repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
waterdotorg/power.Water | project/custom/migrations/0011_auto__add_friendjoinedemaillog__add_unique_friendjoinedemaillog_user_u.py | 1 | 13099 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FriendJoinedEmailLog'
db.create_table('custom_friendjoinedemaillog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_fje', to=orm['auth.User'])),
('user_referred', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_referred_fje', to=orm['auth.User'])),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('custom', ['FriendJoinedEmailLog'])
# Adding unique constraint on 'FriendJoinedEmailLog', fields ['user', 'user_referred']
db.create_unique('custom_friendjoinedemaillog', ['user_id', 'user_referred_id'])
# Adding unique constraint on 'FacebookOGReferredLog', fields ['user', 'user_referred']
db.create_unique('custom_facebookogreferredlog', ['user_id', 'user_referred_id'])
def backwards(self, orm):
# Removing unique constraint on 'FacebookOGReferredLog', fields ['user', 'user_referred']
db.delete_unique('custom_facebookogreferredlog', ['user_id', 'user_referred_id'])
# Removing unique constraint on 'FriendJoinedEmailLog', fields ['user', 'user_referred']
db.delete_unique('custom_friendjoinedemaillog', ['user_id', 'user_referred_id'])
# Deleting model 'FriendJoinedEmailLog'
db.delete_table('custom_friendjoinedemaillog')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'custom.facebookogreferredlog': {
'Meta': {'unique_together': "(('user', 'user_referred'),)", 'object_name': 'FacebookOGReferredLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user'", 'to': "orm['auth.User']"}),
'user_referred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_referred'", 'to': "orm['auth.User']"})
},
'custom.facebookstatusupdate': {
'Meta': {'object_name': 'FacebookStatusUpdate'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'custom.facebookstatusupdatelog': {
'Meta': {'unique_together': "(('facebook_status_update', 'user'),)", 'object_name': 'FacebookStatusUpdateLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'facebook_status_update': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['custom.FacebookStatusUpdate']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'custom.friendjoinedemaillog': {
'Meta': {'unique_together': "(('user', 'user_referred'),)", 'object_name': 'FriendJoinedEmailLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_fje'", 'to': "orm['auth.User']"}),
'user_referred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_referred_fje'", 'to': "orm['auth.User']"})
},
'custom.post': {
'Meta': {'object_name': 'Post'},
'content': ('django.db.models.fields.TextField', [], {}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256'}),
'published_date': ('django.db.models.fields.DateTimeField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'custom.profile': {
'Meta': {'object_name': 'Profile'},
'enable_email_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_facebook_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_twitter_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'followers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'blank': 'True'}),
'semaphore_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'semaphore_twitter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'social_data_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_referrer': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'user_referrer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_referrer'", 'null': 'True', 'to': "orm['auth.User']"})
},
'custom.twitterautofriendshiplog': {
'Meta': {'object_name': 'TwitterAutoFriendshipLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'custom.twitterstatusupdate': {
'Meta': {'object_name': 'TwitterStatusUpdate'},
'content': ('django.db.models.fields.TextField', [], {}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'custom.twitterstatusupdatelog': {
'Meta': {'unique_together': "(('twitter_status_update', 'user'),)", 'object_name': 'TwitterStatusUpdateLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'twitter_status_update': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['custom.TwitterStatusUpdate']"}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['custom'] | gpl-3.0 |
indevgr/django | tests/model_formsets/models.py | 118 | 7878 | from __future__ import unicode_literals
import datetime
import uuid
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
@python_2_unicode_compatible
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __str__(self):
return self.title
def clean(self):
# Ensure author is always accessible in clean method
assert self.author.name is not None
@python_2_unicode_compatible
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=100)
def __str__(self):
return '%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, models.SET_NULL, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __str__(self):
return self.title
@python_2_unicode_compatible
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __str__(self):
return '%s - %s' % (self.title, self.notes)
@python_2_unicode_compatible
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __str__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place, models.CASCADE)
def __str__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, models.CASCADE, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
@python_2_unicode_compatible
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, models.CASCADE, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %d" % (self.owner.name, self.age)
@python_2_unicode_compatible
class Restaurant(Place):
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField(default=False)
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, models.CASCADE, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField(default=False)
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
@python_2_unicode_compatible
class Repository(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Revision(models.Model):
repository = models.ForeignKey(Repository, models.CASCADE)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __str__(self):
return "%s (%s)" % (self.revision, six.text_type(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Player(models.Model):
team = models.ForeignKey(Team, models.SET_NULL, null=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
@python_2_unicode_compatible
class Poet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poem(models.Model):
poet = models.ForeignKey(Poet, models.CASCADE)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
# Models for testing UUID primary keys
class UUIDPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
class UUIDPKChild(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
class ChildWithEditablePK(models.Model):
name = models.CharField(max_length=255, primary_key=True)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
class AutoPKChildOfUUIDPKParent(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
class AutoPKParent(models.Model):
name = models.CharField(max_length=255)
class UUIDPKChildOfAutoPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(AutoPKParent, models.CASCADE)
class ParentWithUUIDAlternateKey(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
class ChildRelatedViaAK(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(ParentWithUUIDAlternateKey, models.CASCADE, to_field='uuid')
| bsd-3-clause |
axinging/chromium-crosswalk | chrome/common/extensions/docs/server2/branch_utility.py | 29 | 8453 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import operator
from environment_wrappers import CreateUrlFetcher
import url_constants
class ChannelInfo(object):
'''Represents a Chrome channel with three pieces of information. |channel| is
one of 'stable', 'beta', 'dev', or 'master'. |branch| and |version| correspond
with each other, and represent different releases of Chrome. Note that
|branch| and |version| can occasionally be the same for separate channels
(i.e. 'beta' and 'dev'), so all three fields are required to uniquely
identify a channel.
'''
def __init__(self, channel, branch, version):
assert isinstance(channel, basestring), channel
assert isinstance(branch, basestring), branch
# TODO(kalman): Assert that this is a string. One day Chromium will probably
# be served out of a git repository and the versions will no longer be ints.
assert isinstance(version, int) or version == 'master', version
self.channel = channel
self.branch = branch
self.version = version
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s%s' % (type(self).__name__, repr(self.__dict__))
def __str__(self):
return repr(self)
class BranchUtility(object):
'''Provides methods for working with Chrome channel, branch, and version
data served from OmahaProxy.
'''
def __init__(self, fetch_url, history_url, fetcher, object_store_creator):
self._fetcher = fetcher
def create_object_store(category):
return object_store_creator.Create(BranchUtility, category=category)
self._branch_object_store = create_object_store('branch')
self._version_object_store = create_object_store('version')
self._fetch_result = self._fetcher.FetchAsync(fetch_url)
self._history_result = self._fetcher.FetchAsync(history_url)
@staticmethod
def Create(object_store_creator):
return BranchUtility(url_constants.OMAHA_PROXY_URL,
url_constants.OMAHA_DEV_HISTORY,
CreateUrlFetcher(),
object_store_creator)
@staticmethod
def GetAllChannelNames():
return ('stable', 'beta', 'dev', 'master')
@staticmethod
def NewestChannel(channels):
channels = set(channels)
for channel in reversed(BranchUtility.GetAllChannelNames()):
if channel in channels:
return channel
def Newer(self, channel_info):
'''Given a ChannelInfo object, returns a new ChannelInfo object
representing the next most recent Chrome version/branch combination.
'''
if channel_info.channel == 'master':
return None
if channel_info.channel == 'stable':
stable_info = self.GetChannelInfo('stable')
if channel_info.version < stable_info.version:
return self.GetStableChannelInfo(channel_info.version + 1)
names = self.GetAllChannelNames()
return self.GetAllChannelInfo()[names.index(channel_info.channel) + 1]
def Older(self, channel_info):
'''Given a ChannelInfo object, returns a new ChannelInfo object
representing the previous Chrome version/branch combination.
'''
if channel_info.channel == 'stable':
if channel_info.version <= 5:
# BranchUtility can't access branch data from before Chrome version 5.
return None
return self.GetStableChannelInfo(channel_info.version - 1)
names = self.GetAllChannelNames()
return self.GetAllChannelInfo()[names.index(channel_info.channel) - 1]
@staticmethod
def SplitChannelNameFromPath(path):
'''Splits the channel name out of |path|, returning the tuple
(channel_name, real_path). If the channel cannot be determined then returns
(None, path).
'''
if '/' in path:
first, second = path.split('/', 1)
else:
first, second = (path, '')
if first in BranchUtility.GetAllChannelNames():
return (first, second)
return (None, path)
def GetAllBranches(self):
return tuple((channel, self.GetChannelInfo(channel).branch)
for channel in BranchUtility.GetAllChannelNames())
def GetAllVersions(self):
return tuple(self.GetChannelInfo(channel).version
for channel in BranchUtility.GetAllChannelNames())
def GetAllChannelInfo(self):
return tuple(self.GetChannelInfo(channel)
for channel in BranchUtility.GetAllChannelNames())
def GetChannelInfo(self, channel):
version = self._ExtractFromVersionJson(channel, 'version')
if version != 'master':
version = int(version)
return ChannelInfo(channel,
self._ExtractFromVersionJson(channel, 'branch'),
version)
def GetStableChannelInfo(self, version):
'''Given a |version| corresponding to a 'stable' version of Chrome, returns
a ChannelInfo object representing that version.
'''
return ChannelInfo('stable', self.GetBranchForVersion(version), version)
def _ExtractFromVersionJson(self, channel_name, data_type):
'''Returns the branch or version number for a channel name.
'''
if channel_name == 'master':
return 'master'
if data_type == 'branch':
object_store = self._branch_object_store
elif data_type == 'version':
object_store = self._version_object_store
data = object_store.Get(channel_name).Get()
if data is not None:
return data
try:
version_json = json.loads(self._fetch_result.Get().content)
except Exception as e:
# This can happen if omahaproxy is misbehaving, which we've seen before.
# Quick hack fix: just serve from master until it's fixed.
logging.error('Failed to fetch or parse branch from omahaproxy: %s! '
'Falling back to "master".' % e)
return 'master'
numbers = {}
for entry in version_json:
if entry['os'] not in ('win', 'linux', 'mac', 'cros'):
continue
for version in entry['versions']:
if version['channel'] != channel_name:
continue
if data_type == 'branch':
number = version['version'].split('.')[2]
elif data_type == 'version':
number = version['version'].split('.')[0]
if number not in numbers:
numbers[number] = 0
else:
numbers[number] += 1
sorted_numbers = sorted(numbers.iteritems(),
key=operator.itemgetter(1),
reverse=True)
object_store.Set(channel_name, sorted_numbers[0][0])
return sorted_numbers[0][0]
def GetBranchForVersion(self, version):
'''Returns the most recent branch for a given chrome version number using
data stored on omahaproxy (see url_constants).
'''
if version == 'master':
return 'master'
branch = self._branch_object_store.Get(str(version)).Get()
if branch is not None:
return branch
version_json = json.loads(self._history_result.Get().content)
for entry in version_json:
version_title = entry['version'].split('.')
if version_title[0] == str(version):
self._branch_object_store.Set(str(version), version_title[2])
return version_title[2]
raise ValueError('The branch for %s could not be found.' % version)
def GetChannelForVersion(self, version):
'''Returns the name of the development channel corresponding to a given
version number.
'''
for channel_info in self.GetAllChannelInfo():
if channel_info.channel == 'stable' and version <= channel_info.version:
return channel_info.channel
if version == channel_info.version:
return channel_info.channel
def GetLatestVersionNumber(self):
'''Returns the most recent version number found using data stored on
omahaproxy.
'''
latest_version = self._version_object_store.Get('latest').Get()
if latest_version is not None:
return latest_version
version_json = json.loads(self._history_result.Get().content)
latest_version = 0
for entry in version_json:
version_title = entry['version'].split('.')
version = int(version_title[0])
if version > latest_version:
latest_version = version
self._version_object_store.Set('latest', latest_version)
return latest_version
| bsd-3-clause |
MyAOSP/external_chromium_org | tools/perf/metrics/speedindex.py | 23 | 12282 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
from metrics import Metric
class SpeedIndexMetric(Metric):
"""The speed index metric is one way of measuring page load speed.
It is meant to approximate user perception of page load speed, and it
is based on the amount of time that it takes to paint to the visual
portion of the screen. It includes paint events that occur after the
onload event, and it doesn't include time loading things off-screen.
This speed index metric is based on WebPageTest.org (WPT).
For more info see: http://goo.gl/e7AH5l
"""
def __init__(self):
super(SpeedIndexMetric, self).__init__()
self._impl = None
self._script_is_loaded = False
self._is_finished = False
with open(os.path.join(os.path.dirname(__file__), 'speedindex.js')) as f:
self._js = f.read()
def Start(self, _, tab):
"""Start recording events.
This method should be called in the WillNavigateToPage method of
a PageMeasurement, so that all the events can be captured. If it's called
in DidNavigateToPage, that will be too late.
"""
self._impl = (VideoSpeedIndexImpl(tab) if tab.video_capture_supported else
PaintRectSpeedIndexImpl(tab))
self._impl.Start()
self._script_is_loaded = False
self._is_finished = False
def Stop(self, _, tab):
"""Stop timeline recording."""
assert self._impl, 'Must call Start() before Stop()'
assert self.IsFinished(tab), 'Must wait for IsFinished() before Stop()'
self._impl.Stop()
# Optional argument chart_name is not in base class Metric.
# pylint: disable=W0221
def AddResults(self, tab, results, chart_name=None):
"""Calculate the speed index and add it to the results."""
index = self._impl.CalculateSpeedIndex()
# Release the tab so that it can be disconnected.
self._impl = None
results.Add('speed_index', 'ms', index, chart_name=chart_name)
def IsFinished(self, tab):
"""Decide whether the timeline recording should be stopped.
When the timeline recording is stopped determines which paint events
are used in the speed index metric calculation. In general, the recording
should continue if there has just been some data received, because
this suggests that painting may continue.
A page may repeatedly request resources in an infinite loop; a timeout
should be placed in any measurement that uses this metric, e.g.:
def IsDone():
return self._speedindex.IsFinished(tab)
util.WaitFor(IsDone, 60)
Returns:
True if 2 seconds have passed since last resource received, false
otherwise.
"""
if self._is_finished:
return True
# The script that provides the function window.timeSinceLastResponseMs()
# needs to be loaded for this function, but it must be loaded AFTER
# the Start method is called, because if the Start method is called in
# the PageMeasurement's WillNavigateToPage function, then it will
# not be available here. The script should only be re-loaded once per page
# so that variables in the script get reset only for a new page.
if not self._script_is_loaded:
tab.ExecuteJavaScript(self._js)
self._script_is_loaded = True
time_since_last_response_ms = tab.EvaluateJavaScript(
"window.timeSinceLastResponseAfterLoadMs()")
self._is_finished = time_since_last_response_ms > 2000
return self._is_finished
class SpeedIndexImpl(object):
def __init__(self, tab):
"""Constructor.
Args:
tab: The telemetry.core.Tab object for which to calculate SpeedIndex.
"""
self.tab = tab
def Start(self):
raise NotImplementedError()
def Stop(self):
raise NotImplementedError()
def GetTimeCompletenessList(self):
"""Returns a list of time to visual completeness tuples.
In the WPT PHP implementation, this is also called 'visual progress'.
"""
raise NotImplementedError()
def CalculateSpeedIndex(self):
"""Calculate the speed index.
The speed index number conceptually represents the number of milliseconds
that the page was "visually incomplete". If the page were 0% complete for
1000 ms, then the score would be 1000; if it were 0% complete for 100 ms
then 90% complete (ie 10% incomplete) for 900 ms, then the score would be
1.0*100 + 0.1*900 = 190.
Returns:
A single number, milliseconds of visual incompleteness.
"""
time_completeness_list = self.GetTimeCompletenessList()
prev_completeness = 0.0
speed_index = 0.0
prev_time = time_completeness_list[0][0]
for time, completeness in time_completeness_list:
# Add the incemental value for the interval just before this event.
elapsed_time = time - prev_time
incompleteness = (1.0 - prev_completeness)
speed_index += elapsed_time * incompleteness
# Update variables for next iteration.
prev_completeness = completeness
prev_time = time
return speed_index
class VideoSpeedIndexImpl(SpeedIndexImpl):
def __init__(self, tab):
super(VideoSpeedIndexImpl, self).__init__(tab)
assert self.tab.video_capture_supported
self._time_completeness_list = None
def Start(self):
# TODO(tonyg): Bitrate is arbitrary here. Experiment with screen capture
# overhead vs. speed index accuracy and set the bitrate appropriately.
self.tab.StartVideoCapture(min_bitrate_mbps=4)
def Stop(self):
histograms = [(time, bitmap.ColorHistogram())
for time, bitmap in self.tab.StopVideoCapture()]
start_histogram = histograms[0][1]
final_histogram = histograms[-1][1]
def Difference(hist1, hist2):
return (abs(a - b) for a, b in zip(hist1, hist2))
full_difference = list(Difference(start_histogram, final_histogram))
total = float(sum(full_difference))
def FrameProgress(histogram):
difference = Difference(start_histogram, histogram)
# Each color bucket is capped at the full difference, so that progress
# does not exceed 100%.
return sum(min(a, b) for a, b in zip(difference, full_difference))
self._time_completeness_list = [(time, FrameProgress(hist) / total)
for time, hist in histograms]
def GetTimeCompletenessList(self):
assert self._time_completeness_list, 'Must call Stop() first.'
return self._time_completeness_list
class PaintRectSpeedIndexImpl(SpeedIndexImpl):
def __init__(self, tab):
super(PaintRectSpeedIndexImpl, self).__init__(tab)
def Start(self):
self.tab.StartTimelineRecording()
def Stop(self):
self.tab.StopTimelineRecording()
def GetTimeCompletenessList(self):
events = self.tab.timeline_model.GetAllEvents()
viewport = self._GetViewportSize()
paint_events = self._IncludedPaintEvents(events)
time_area_dict = self._TimeAreaDict(paint_events, viewport)
total_area = sum(time_area_dict.values())
assert total_area > 0.0, 'Total paint event area must be greater than 0.'
completeness = 0.0
time_completeness_list = []
# TODO(tonyg): This sets the start time to the start of the first paint
# event. That can't be correct. The start time should be navigationStart.
# Since the previous screen is not cleared at navigationStart, we should
# probably assume the completeness is 0 until the first paint and add the
# time of navigationStart as the start. We need to confirm what WPT does.
time_completeness_list.append(
(self.tab.timeline_model.GetAllEvents()[0].start, completeness))
for time, area in sorted(time_area_dict.items()):
completeness += float(area) / total_area
# Visual progress is rounded to the nearest percentage point as in WPT.
time_completeness_list.append((time, round(completeness, 2)))
return time_completeness_list
def _GetViewportSize(self):
"""Returns dimensions of the viewport."""
return self.tab.EvaluateJavaScript(
'[ window.innerWidth, window.innerHeight ]')
def _IncludedPaintEvents(self, events):
"""Get all events that are counted in the calculation of the speed index.
There's one category of paint event that's filtered out: paint events
that occur before the first 'ResourceReceiveResponse' and 'Layout' events.
Previously in the WPT speed index, paint events that contain children paint
events were also filtered out.
"""
def FirstLayoutTime(events):
"""Get the start time of the first layout after a resource received."""
has_received_response = False
for event in events:
if event.name == 'ResourceReceiveResponse':
has_received_response = True
elif has_received_response and event.name == 'Layout':
return event.start
assert False, 'There were no layout events after resource receive events.'
first_layout_time = FirstLayoutTime(events)
paint_events = [e for e in events
if e.start >= first_layout_time and e.name == 'Paint']
return paint_events
def _TimeAreaDict(self, paint_events, viewport):
"""Make a dict from time to adjusted area value for events at that time.
The adjusted area value of each paint event is determined by how many paint
events cover the same rectangle, and whether it's a full-window paint event.
"Adjusted area" can also be thought of as "points" of visual completeness --
each rectangle has a certain number of points and these points are
distributed amongst the paint events that paint that rectangle.
Args:
paint_events: A list of paint events
viewport: A tuple (width, height) of the window.
Returns:
A dictionary of times of each paint event (in milliseconds) to the
adjusted area that the paint event is worth.
"""
width, height = viewport
fullscreen_area = width * height
def ClippedArea(rectangle):
"""Returns rectangle area clipped to viewport size."""
_, x0, y0, x1, y1 = rectangle
clipped_width = max(0, min(width, x1) - max(0, x0))
clipped_height = max(0, min(height, y1) - max(0, y0))
return clipped_width * clipped_height
grouped = self._GroupEventByRectangle(paint_events)
event_area_dict = collections.defaultdict(int)
for rectangle, events in grouped.items():
# The area points for each rectangle are divided up among the paint
# events in that rectangle.
area = ClippedArea(rectangle)
update_count = len(events)
adjusted_area = float(area) / update_count
# Paint events for the largest-area rectangle are counted as 50%.
if area == fullscreen_area:
adjusted_area /= 2
for event in events:
# The end time for an event is used for that event's time.
event_time = event.end
event_area_dict[event_time] += adjusted_area
return event_area_dict
def _GetRectangle(self, paint_event):
"""Get the specific rectangle on the screen for a paint event.
Each paint event belongs to a frame (as in html <frame> or <iframe>).
This, together with location and dimensions, comprises a rectangle.
In the WPT source, this 'rectangle' is also called a 'region'.
"""
def GetBox(quad):
"""Gets top-left and bottom-right coordinates from paint event.
In the timeline data from devtools, paint rectangle dimensions are
represented x-y coordinates of four corners, clockwise from the top-left.
See: function WebInspector.TimelinePresentationModel.quadFromRectData
in file src/out/Debug/obj/gen/devtools/TimelinePanel.js.
"""
x0, y0, _, _, x1, y1, _, _ = quad
return (x0, y0, x1, y1)
assert paint_event.name == 'Paint'
frame = paint_event.args['frameId']
return (frame,) + GetBox(paint_event.args['data']['clip'])
def _GroupEventByRectangle(self, paint_events):
"""Group all paint events according to the rectangle that they update."""
result = collections.defaultdict(list)
for event in paint_events:
assert event.name == 'Paint'
result[self._GetRectangle(event)].append(event)
return result
| bsd-3-clause |
Julian/home-assistant | homeassistant/helpers/config_validation.py | 2 | 11624 | """Helpers for config validation using voluptuous."""
from datetime import timedelta
import jinja2
import voluptuous as vol
from homeassistant.loader import get_platform
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE)
from homeassistant.helpers.entity import valid_entity_id
import homeassistant.util.dt as dt_util
from homeassistant.util import slugify
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys):
"""Validator that at least one key exists."""
def validate(obj):
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def boolean(value):
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isfile(value):
"""Validate that the value is an existing file."""
return vol.IsFile('not a file')(value)
def ensure_list(value):
"""Wrap value in list if it is not one."""
return value if isinstance(value, list) else [value]
def entity_id(value):
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def entity_ids(value):
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
def icon(value):
"""Validate icon."""
value = str(value)
if value.startswith('mdi:'):
return value
raise vol.Invalid('Icons should start with prefix "mdi:"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time_period_str(value):
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
elif not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
time_period = vol.Any(time_period_str, timedelta, time_period_dict)
def log_exception(logger, ex, domain, config):
"""Generate log exception for config validation."""
message = 'Invalid config for [{}]: '.format(domain)
if 'extra keys not allowed' in ex.error_message:
message += '[{}] is an invalid option for [{}]. Check: {}->{}.'\
.format(ex.path[-1], domain, domain,
'->'.join('%s' % m for m in ex.path))
else:
message += str(ex)
if hasattr(config, '__line__'):
message += " (See {}:{})".format(config.__config_file__,
config.__line__ or '?')
logger.error(message)
def match_all(value):
"""Validator that matches all values."""
return value
def platform_validator(domain):
"""Validate if platform exists for given domain."""
def validator(value):
"""Test if platform exists."""
if value is None:
raise vol.Invalid('platform cannot be None')
if get_platform(domain, str(value)):
return value
raise vol.Invalid(
'platform {} does not exist for {}'.format(value, domain))
return validator
def positive_timedelta(value):
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def slug(value):
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def string(value):
"""Coerce value to string, except for None."""
if value is not None:
return str(value)
raise vol.Invalid('string value is None')
def temperature_unit(value):
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
elif value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
value = str(value)
try:
jinja2.Environment().parse(value)
return value
except jinja2.exceptions.TemplateSyntaxError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def time(value):
"""Validate time."""
time_val = dt_util.parse_time(value)
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): string,
CONF_SCAN_INTERVAL: vol.All(vol.Coerce(int), vol.Range(min=1)),
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,
vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template},
vol.Optional(CONF_ENTITY_ID): entity_ids,
}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any('sunset', 'sunrise')),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.Any(
vol.All(time_period, positive_timedelta),
template)
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA, EVENT_SCHEMA,
CONDITION_SCHEMA)],
)
| mit |
liangazhou/django-rdp | packages/Django-1.8.6/tests/migrations/test_commands.py | 5 | 39966 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import importlib
import os
import shutil
from django.apps import apps
from django.core.management import CommandError, call_command
from django.db import DatabaseError, connection, models
from django.db.migrations import questioner
from django.db.migrations.recorder import MigrationRecorder
from django.test import ignore_warnings, mock, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from .models import UnicodeModel, UnserializableModel
from .test_base import MigrationTestBase
class MigrateTests(MigrationTestBase):
"""
Tests running the migrate command.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_fake_initial(self):
"""
#24184 - Tests that --fake-initial only works if all tables created in
the initial migration of an app exists
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", verbosity=0)
# Run initial migration with an explicit --fake-initial
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command("migrate", "migrations", "0001", fake_initial=True, stdout=out, verbosity=1)
self.assertIn(
"migrations.0001_initial... faked",
out.getvalue().lower()
)
# Run migrations all the way
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", verbosity=0)
# Run initial migration with an explicit --fake-initial
with self.assertRaises(DatabaseError):
# Fails because "migrations_tribble" does not exist but needs to in
# order to make --fake-initial work.
call_command("migrate", "migrations", fake_initial=True, verbosity=0)
# Fake a apply
call_command("migrate", "migrations", fake=True, verbosity=0)
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_migrate_conflict_exit(self):
"""
Makes sure that migrate exits if it detects a conflict.
"""
with self.assertRaisesMessage(CommandError, "Conflicting migrations detected"):
call_command("migrate", "migrations")
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_list(self):
"""
Tests --list output of migrate command
"""
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("migrate", list=True, stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = six.StringIO()
# Giving the explicit app_label tests for selective `show_migration_list` in the command
call_command("migrate", "migrations", list=True, stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_showmigrations_list(self):
"""
Tests --list output of showmigrations command
"""
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = six.StringIO()
# Giving the explicit app_label tests for selective `show_list` in the command
call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_showmigrations_plan(self):
"""
Tests --plan output of showmigrations command
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertIn(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third\n"
"[ ] migrations.0002_second",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertIn(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial)",
out.getvalue().lower()
)
call_command("migrate", "migrations", "0003", verbosity=0)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertIn(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third\n"
"[ ] migrations.0002_second",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertIn(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial)",
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"})
def test_showmigrations_plan_no_migrations(self):
"""
Tests --plan output of showmigrations command without migrations
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual("", out.getvalue().lower())
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual("", out.getvalue().lower())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_showmigrations_plan_squashed(self):
"""
Tests --plan output of showmigrations command with squashed migrations.
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto\n"
"[ ] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto ... (migrations.1_auto)\n"
"[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
call_command("migrate", "migrations", "3_squashed_5", verbosity=0)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto\n"
"[x] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto ... (migrations.1_auto)\n"
"[x] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate(self):
"""
Makes sure that sqlmigrate does something.
"""
# Make sure the output is wrapped in a transaction
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue()
self.assertIn(connection.ops.start_transaction_sql(), output)
self.assertIn(connection.ops.end_transaction_sql(), output)
# Test forwards. All the databases agree on CREATE TABLE, at least.
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
self.assertIn("create table", out.getvalue().lower())
# Cannot generate the reverse SQL unless we've applied the migration.
call_command("migrate", "migrations", verbosity=0)
# And backwards is a DROP TABLE
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
self.assertIn("drop table", out.getvalue().lower())
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.unmigrated_app"])
def test_regression_22823_unmigrated_fk_to_migrated_model(self):
"""
https://code.djangoproject.com/ticket/22823
Assuming you have 3 apps, `A`, `B`, and `C`, such that:
* `A` has migrations
* `B` has a migration we want to apply
* `C` has no migrations, but has an FK to `A`
When we try to migrate "B", an exception occurs because the
"B" was not included in the ProjectState that is used to detect
soft-applied migrations.
"""
call_command("migrate", "migrated_unapplied_app", stdout=six.StringIO())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_replaced(self):
"""
Running a single squashed migration should record all of the original
replaced migrations as run.
"""
recorder = MigrationRecorder(connection)
out = six.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0001_initial"), applied_migrations)
self.assertIn(("migrations", "0002_second"), applied_migrations)
self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations)
# Rollback changes
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_squashed(self):
"""
Running migrate for a squashed migration should record as run
if all of the replaced migrations have been run (#25231).
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
out = six.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations()
)
# No changes were actually applied so there is nothing to rollback
class MakeMigrationsTests(MigrationTestBase):
"""
Tests running the makemigrations command.
"""
# Because the `import_module` performed in `MigrationLoader` will cache
# the migrations package, we can't reuse the same migration package
# between tests. This is only a problem for testing, since `makemigrations`
# is normally called in its own process.
creation_counter = 0
def setUp(self):
MakeMigrationsTests.creation_counter += 1
self.migration_dir = os.path.join(self.test_dir, 'migrations_%d' % self.creation_counter)
self.migration_pkg = "migrations.migrations_%d" % self.creation_counter
self._old_models = apps.app_configs['migrations'].models.copy()
def tearDown(self):
apps.app_configs['migrations'].models = self._old_models
apps.all_models['migrations'] = self._old_models
apps.clear_cache()
_cwd = os.getcwd()
os.chdir(self.test_dir)
try:
try:
self._rmrf(self.migration_dir)
except OSError:
pass
try:
self._rmrf(os.path.join(self.test_dir,
"test_migrations_path_doesnt_exist"))
except OSError:
pass
finally:
os.chdir(_cwd)
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def test_files_content(self):
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model('migrations', UnicodeModel)
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "migrations", verbosity=0)
init_file = os.path.join(self.migration_dir, "__init__.py")
# Check for existing __init__.py file in migrations folder
self.assertTrue(os.path.exists(init_file))
with open(init_file, 'r') as fp:
content = force_text(fp.read())
self.assertEqual(content, '')
initial_file = os.path.join(self.migration_dir, "0001_initial.py")
# Check for existing 0001_initial.py file in migration folder
self.assertTrue(os.path.exists(initial_file))
with codecs.open(initial_file, 'r', encoding='utf-8') as fp:
content = fp.read()
self.assertIn('# -*- coding: utf-8 -*-', content)
self.assertIn('migrations.CreateModel', content)
if six.PY3:
self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name
self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural
self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name
self.assertIn('“Ðjáñgó”', content) # title.default
else:
self.assertIn('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8', content) # Meta.verbose_name
self.assertIn('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8\\xdf', content) # Meta.verbose_name_plural
self.assertIn('\\xda\\xd1\\xcd\\xa2\\xd3\\xd0\\xc9', content) # title.verbose_name
self.assertIn('\\u201c\\xd0j\\xe1\\xf1g\\xf3\\u201d', content) # title.default
def test_failing_migration(self):
# If a migration fails to serialize, it shouldn't generate an empty file. #21280
apps.register_model('migrations', UnserializableModel)
with six.assertRaisesRegex(self, ValueError, r'Cannot serialize'):
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(self.migration_dir, "0001_initial.py")
self.assertFalse(os.path.exists(initial_file))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_conflict_exit(self):
"""
Makes sure that makemigrations exits if it detects a conflict.
"""
with self.assertRaises(CommandError):
call_command("makemigrations")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_makemigrations_merge_no_conflict(self):
"""
Makes sure that makemigrations exits if in merge mode with no conflicts.
"""
out = six.StringIO()
try:
call_command("makemigrations", merge=True, stdout=out)
except CommandError:
self.fail("Makemigrations errored in merge mode with no conflicts")
self.assertIn("No conflicts detected to merge.", out.getvalue())
def test_makemigrations_no_app_sys_exit(self):
"""
Makes sure that makemigrations exits if a non-existent app is specified.
"""
err = six.StringIO()
with self.assertRaises(SystemExit):
call_command("makemigrations", "this_app_does_not_exist", stderr=err)
self.assertIn("'this_app_does_not_exist' could not be found.", err.getvalue())
def test_makemigrations_empty_no_app_specified(self):
"""
Makes sure that makemigrations exits if no app is specified with 'empty' mode.
"""
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
self.assertRaises(CommandError, call_command, "makemigrations", empty=True)
def test_makemigrations_empty_migration(self):
"""
Makes sure that makemigrations properly constructs an empty migration.
"""
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
try:
call_command("makemigrations", "migrations", empty=True, verbosity=0)
except CommandError:
self.fail("Makemigrations errored in creating empty migration for a proper app.")
initial_file = os.path.join(self.migration_dir, "0001_initial.py")
# Check for existing 0001_initial.py file in migration folder
self.assertTrue(os.path.exists(initial_file))
with codecs.open(initial_file, 'r', encoding='utf-8') as fp:
content = fp.read()
self.assertIn('# -*- coding: utf-8 -*-', content)
# Remove all whitespace to check for empty dependencies and operations
content = content.replace(' ', '')
self.assertIn('dependencies=[\n]', content)
self.assertIn('operations=[\n]', content)
def test_makemigrations_no_changes_no_apps(self):
"""
Makes sure that makemigrations exits when there are no changes and no apps are specified.
"""
out = six.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_changes"})
def test_makemigrations_no_changes(self):
"""
Makes sure that makemigrations exits when there are no changes to an app.
"""
out = six.StringIO()
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("No changes detected in app 'migrations'", out.getvalue())
def test_makemigrations_migrations_announce(self):
"""
Makes sure that makemigrations announces the migration at the default verbosity level.
"""
out = six.StringIO()
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("Migrations for 'migrations'", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_ancestor"})
def test_makemigrations_no_common_ancestor(self):
"""
Makes sure that makemigrations fails to merge migrations with no common ancestor.
"""
with self.assertRaises(ValueError) as context:
call_command("makemigrations", "migrations", merge=True)
exception_message = str(context.exception)
self.assertIn("Could not find common ancestor of", exception_message)
self.assertIn("0002_second", exception_message)
self.assertIn("0002_conflicting_second", exception_message)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_interactive_reject(self):
"""
Makes sure that makemigrations enters and exits interactive mode properly.
"""
# Monkeypatch interactive questioner to auto reject
old_input = questioner.input
questioner.input = lambda _: "N"
try:
call_command("makemigrations", "migrations", merge=True, interactive=True, verbosity=0)
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
finally:
questioner.input = old_input
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_interactive_accept(self):
"""
Makes sure that makemigrations enters interactive mode and merges properly.
"""
# Monkeypatch interactive questioner to auto accept
old_input = questioner.input
questioner.input = lambda _: "y"
out = six.StringIO()
try:
call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
os.remove(merge_file)
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
finally:
questioner.input = old_input
self.assertIn("Created new merge migration", force_text(out.getvalue()))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_handle_merge(self):
"""
Makes sure that makemigrations properly merges the conflicting migrations with --noinput.
"""
out = six.StringIO()
call_command("makemigrations", "migrations", merge=True, interactive=False, stdout=out)
output = force_text(out.getvalue())
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
os.remove(merge_file)
self.assertFalse(os.path.exists(merge_file))
self.assertIn("Created new merge migration", output)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigration_merge_dry_run(self):
"""
Makes sure that makemigrations respects --dry-run option when fixing
migration conflicts (#24427).
"""
out = six.StringIO()
call_command("makemigrations", "migrations", dry_run=True, merge=True, interactive=False, stdout=out)
merge_file = os.path.join(self.test_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = force_text(out.getvalue())
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigration_merge_dry_run_verbosity_3(self):
"""
Makes sure that `makemigrations --merge --dry-run` writes the merge
migration file to stdout with `verbosity == 3` (#24427).
"""
out = six.StringIO()
call_command("makemigrations", "migrations", dry_run=True, merge=True, interactive=False,
stdout=out, verbosity=3)
merge_file = os.path.join(self.test_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = force_text(out.getvalue())
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
# Additional output caused by verbosity 3
# The complete merge migration file that would be written
self.assertIn("# -*- coding: utf-8 -*-", output)
self.assertIn("class Migration(migrations.Migration):", output)
self.assertIn("dependencies = [", output)
self.assertIn("('migrations', '0002_second')", output)
self.assertIn("('migrations', '0002_conflicting_second')", output)
self.assertIn("operations = [", output)
self.assertIn("]", output)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_default"})
def test_makemigrations_dry_run(self):
"""
Ticket #22676 -- `makemigrations --dry-run` should not ask for defaults.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_date = models.DateField() # Added field without a default
class Meta:
app_label = "migrations"
out = six.StringIO()
call_command("makemigrations", "migrations", dry_run=True, stdout=out)
# Output the expected changes directly, without asking for defaults
self.assertIn("Add field silly_date to sillymodel", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_default"})
def test_makemigrations_dry_run_verbosity_3(self):
"""
Ticket #22675 -- Allow `makemigrations --dry-run` to output the
migrations file to stdout (with verbosity == 3).
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_char = models.CharField(default="")
class Meta:
app_label = "migrations"
out = six.StringIO()
call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3)
# Normal --dry-run output
self.assertIn("- Add field silly_char to sillymodel", out.getvalue())
# Additional output caused by verbosity 3
# The complete migrations file that would be written
self.assertIn("# -*- coding: utf-8 -*-", out.getvalue())
self.assertIn("class Migration(migrations.Migration):", out.getvalue())
self.assertIn("dependencies = [", out.getvalue())
self.assertIn("('migrations', '0001_initial'),", out.getvalue())
self.assertIn("migrations.AddField(", out.getvalue())
self.assertIn("model_name='sillymodel',", out.getvalue())
self.assertIn("name='silly_char',", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_path_doesnt_exist.foo.bar"})
def test_makemigrations_migrations_modules_path_not_exist(self):
"""
Ticket #22682 -- Makemigrations fails when specifying custom location
for migration files (using MIGRATION_MODULES) if the custom path
doesn't already exist.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = six.StringIO()
call_command("makemigrations", "migrations", stdout=out)
# Command output indicates the migration is created.
self.assertIn(" - Create model SillyModel", out.getvalue())
# Migrations file is actually created in the expected path.
self.assertTrue(os.path.isfile(os.path.join(self.test_dir,
"test_migrations_path_doesnt_exist", "foo", "bar",
"0001_initial.py")))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_interactive_by_default(self):
"""
Makes sure that the user is prompted to merge by default if there are
conflicts and merge is True. Answer negative to differentiate it from
behavior when --noinput is specified.
"""
# Monkeypatch interactive questioner to auto reject
old_input = questioner.input
questioner.input = lambda _: "N"
out = six.StringIO()
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
try:
call_command("makemigrations", "migrations", merge=True, stdout=out)
# This will fail if interactive is False by default
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
finally:
questioner.input = old_input
if os.path.exists(merge_file):
os.remove(merge_file)
self.assertNotIn("Created new merge migration", out.getvalue())
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_changes"},
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_no_merge(self):
"""
Makes sure that makemigrations does not raise a CommandError when an
unspecified app has conflicting migrations.
"""
try:
call_command("makemigrations", "migrations", merge=False, verbosity=0)
except CommandError:
self.fail("Makemigrations fails resolving conflicts in an unspecified app")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_merge(self):
"""
Makes sure that makemigrations does not create a merge for an
unspecified app even if it has conflicting migrations.
"""
# Monkeypatch interactive questioner to auto accept
old_input = questioner.input
questioner.input = lambda _: "y"
out = six.StringIO()
merge_file = os.path.join(self.test_dir,
'migrations_test_apps',
'unspecified_app_with_conflict',
'migrations',
'0003_merge.py')
try:
call_command("makemigrations", "migrated_app", merge=True, interactive=True, stdout=out)
self.assertFalse(os.path.exists(merge_file))
self.assertIn("No conflicts detected to merge.", out.getvalue())
except CommandError:
self.fail("Makemigrations fails resolving conflicts in an unspecified app")
finally:
questioner.input = old_input
if os.path.exists(merge_file):
os.remove(merge_file)
def test_makemigrations_with_custom_name(self):
"""
Makes sure that makemigrations generate a custom migration.
"""
def cmd(migration_count, migration_name, *args):
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
try:
call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args)
except CommandError:
self.fail("Makemigrations errored in creating empty migration with custom name for a proper app.")
migration_file = os.path.join(self.migration_dir, "%s_%s.py" % (migration_count, migration_name))
# Check for existing migration file in migration folder
self.assertTrue(os.path.exists(migration_file))
with codecs.open(migration_file, "r", encoding="utf-8") as fp:
content = fp.read()
self.assertIn("# -*- coding: utf-8 -*-", content)
content = content.replace(" ", "")
return content
# generate an initial migration
migration_name_0001 = "my_initial_migration"
content = cmd("0001", migration_name_0001)
self.assertIn("dependencies=[\n]", content)
# Python 3.3+ importlib caches os.listdir() on some platforms like
# Mac OS X (#23850).
if hasattr(importlib, 'invalidate_caches'):
importlib.invalidate_caches()
# generate an empty migration
migration_name_0002 = "my_custom_migration"
content = cmd("0002", migration_name_0002, "--empty")
self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content)
self.assertIn("operations=[\n]", content)
def test_makemigrations_exit(self):
"""
makemigrations --exit should exit with sys.exit(1) when there are no
changes to an app.
"""
with self.settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "--exit", "migrations", verbosity=0)
with self.settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_changes"}):
with self.assertRaises(SystemExit):
call_command("makemigrations", "--exit", "migrations", verbosity=0)
class SquashMigrationsTest(MigrationTestBase):
"""
Tests running the squashmigrations command.
"""
path = "test_migrations/0001_squashed_0002_second.py"
path = os.path.join(MigrationTestBase.test_dir, path)
def tearDown(self):
if os.path.exists(self.path):
os.remove(self.path)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_squashmigrations_squashes(self):
"""
Tests that squashmigrations squashes migrations.
"""
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
self.assertTrue(os.path.exists(self.path))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_squashmigrations_optimizes(self):
"""
Tests that squashmigrations optimizes operations.
"""
out = six.StringIO()
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out)
self.assertIn("Optimized from 7 operations to 5 operations.", force_text(out.getvalue()))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_ticket_23799_squashmigrations_no_optimize(self):
"""
Makes sure that squashmigrations --no-optimize really doesn't optimize operations.
"""
out = six.StringIO()
call_command("squashmigrations", "migrations", "0002",
interactive=False, verbosity=1, no_optimize=True, stdout=out)
self.assertIn("Skipping optimization", force_text(out.getvalue()))
| apache-2.0 |
HugoDelval/ip-reputation-monitoring | reputation/db/postgres.py | 1 | 4686 | #
# Copyright (C) 2016, OVH SAS
#
# This file is part of ip-reputation-monitoring.
#
# ip-reputation-monitoring is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Everything you need to deal with the databases is here. """
from datetime import datetime
import psycopg2
from psycopg2 import extras
from config import secrets
class Postgres(object):
"""
This class is designed to provide everything needed to deal with postgres
In other words, this class is a typical data access object.
"""
def __enter__(self):
self._open()
return self
def __exit__(self, type_exc, value, traceback):
self._close()
return False
def _open(self):
"""
Open connection to PostgreSQL
"""
ssl = 'require' if secrets.SPAMHAUS_DB['secured'] else None
self._connection = psycopg2.connect(database=secrets.SPAMHAUS_DB['db'],
user=secrets.SPAMHAUS_DB['user'],
password=secrets.SPAMHAUS_DB['password'],
host=secrets.SPAMHAUS_DB['host'],
port=secrets.SPAMHAUS_DB['port'],
sslmode=ssl)
self._cursor = self._connection.cursor(cursor_factory=extras.DictCursor)
def _close(self):
""" Close db's connection. """
self._connection.close()
def update_spamhaus_entries(self, documents):
"""
Update or insert a spamhaus entry into the spamhaus table. For each entry that
is no longer active, update them to set their attr `active` to false.
:param list documents: List of dictionaries representing documents to upsert having at least those mandatory keys: [sbl_number, cidr, first_seen, cause]
"""
now = datetime.now()
# First upsert still active entries
for document in documents:
self._cursor.execute("INSERT INTO spamhaus (sbl_number, cidr, first_seen, cause) "
"VALUES (%(sbl_number)s, %(cidr)s, %(first_seen)s, %(cause)s) "
"ON CONFLICT (sbl_number) DO UPDATE SET "
" last_seen = %(now)s,"
" active = TRUE",
{
"sbl_number": document['sbl_number'],
"cidr": document['cidr'],
"now": now,
"first_seen": document["first_seen"],
"cause": document["cause"]
})
# Now, set inactive all active documents that are not in documents
active_ids = [doc['sbl_number'] for doc in documents]
self._cursor.execute("UPDATE spamhaus "
"SET active = FALSE "
"WHERE active = TRUE AND sbl_number NOT IN %(actives)s",
{
"actives": tuple(active_ids)
})
self._connection.commit()
def find_spamhaus_entries(self, is_active=None):
"""
Retrieve all registered spamhaus tickets.
:param bool is_active: (Optional) Filter tickets depending if they're still active or not.
:rtype: cursor
:return: All desired spamhaus tickets sorted by first_seen date (asc)
"""
if is_active is None:
self._cursor.execute("SELECT * FROM spamhaus "
"ORDER BY first_seen ASC")
return self._cursor.fetchall()
self._cursor.execute("SELECT * FROM spamhaus "
"WHERE active = %(active)s "
"ORDER BY first_seen ASC",
{
"active": is_active
})
return self._cursor.fetchall()
| gpl-3.0 |
qqbot-pet-game/qqbot-pet-game | Notify.py | 11 | 1533 | # -*- coding: utf-8 -*-
# Code by Yinzo: https://github.com/Yinzo
# Origin repository: https://github.com/Yinzo/SmartQQBot
class Notify:
def __init__(self, json_input):
self.poll_type = json_input['poll_type']
class InputNotify(Notify):
def __init__(self, json_input):
Notify.__init__(self, json_input)
self.from_uin = json_input['value']['from_uin']
self.msg_id = json_input['value']['msg_id']
self.msg_id2 = json_input['value']['msg_id2']
self.msg_type = json_input['value']['msg_type']
self.reply_ip = json_input['value']['reply_ip']
self.to_uin = json_input['value']['to_uin']
class BuddiesStatusChange(Notify):
def __init__(self, json_input):
Notify.__init__(self, json_input)
self.status = json_input['value']['status']
self.client_type = json_input['value']['client_type']
self.uin = json_input['value']['uin']
class KickMessage(Notify):
def __init__(self, json_input):
Notify.__init__(self, json_input)
self.reply_ip = json_input['value']['reply_ip']
self.msg_type = json_input['value']['msg_type']
self.msg_id = json_input['value']['msg_id']
self.reason = json_input['value']['reason']
self.msg_id2 = json_input['value']['msg_id2']
self.from_uin = json_input['value']['from_uin']
self.show_reason = json_input['value']['show_reason']
self.to_uin = json_input['value']['to_uin']
| gpl-3.0 |
dmnielsen/zSALT | zsalt/mosred.py | 1 | 3102 |
"""
mosred
Process MOS spectral reductions of the data and produce
the output spectra for each object
"""
import os, sys, glob, shutil
import numpy as np
import pyfits
from scipy.ndimage.filters import median_filter
from pyraf import iraf
from iraf import pysalt
from saltobslog import obslog
from specselfid import specselfid
from specslit import specslit
from specidentify import specidentify
from specrectify import specrectify
def mosred(infile_list, slitmask,propcode=None, dy=0, inter=True, guesstype='rss', guessfile='', rstep=100, automethod='Matchlines'):
#set up the files
infiles=','.join(['%s' % x for x in infile_list])
obsdate=os.path.basename(infile_list[0])[7:15]
#set up some files that will be needed
logfile='spec'+obsdate+'.log'
dbfile='spec%s.db' % obsdate
#create the observation log
obs_dict=obslog(infile_list)
#apply the mask to the data sets
for i in range(len(infile_list)):
specslit(image=infile_list[i], outimage='', outpref='s', exttype='rsmt', slitfile=slitmask,
outputslitfile='', regprefix='ds_', sections=3, width=25.0, sigma=2.2, thres=6.0, order=1, padding=5, yoffset=dy,
inter=False, clobber=True, logfile=logfile, verbose=True)
for i in range(len(infile_list)):
if obs_dict['OBJECT'][i].upper().strip()=='ARC' and obs_dict['PROPID'][i].upper().strip()==propcode:
lamp=obs_dict['LAMPID'][i].strip().replace(' ', '')
arcimage='s'+os.path.basename(infile_list[i])
if lamp == 'NONE': lamp='CuAr'
lampfile=iraf.osfn("../../%s.salt" % lamp)
specselfid(arcimage, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True)
specidentify('a'+arcimage, lampfile, dbfile, guesstype=guesstype,
guessfile=guessfile, automethod=automethod, function='legendre', order=3,
rstep=rstep, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3,
inter=False, clobber=True, logfile=logfile, verbose=True)
#specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line',
# function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None,
# blank=0.0, clobber=True, logfile=logfile, verbose=True)
objimages=''
spec_list=[]
for i in range(len(infile_list)):
if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS') and \
obs_dict['PROPID'][i].upper().strip()==propcode and \
obs_dict['OBSMODE'][i].count('SPECTROSCOPY'):
img = infile_list[i]
##rectify it
specselfid('s'+img, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True)
specrectify('as'+img, outimages='', outpref='x', solfile=dbfile, caltype='line',
function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None,
blank=0.0, clobber=True, logfile=logfile, verbose=True)
| bsd-3-clause |
StephenWeber/ansible | lib/ansible/modules/network/cumulus/_cl_interface.py | 11 | 15158 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_interface
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a front panel port, loopback or
management port on Cumulus Linux.
deprecated: Deprecated in 2.3. Use M(nclu) instead.
description:
- Configures a front panel, sub-interface, SVI, management or loopback port
on a Cumulus Linux switch. For bridge ports use the cl_bridge module. For
bond ports use the cl_bond module. When configuring bridge related
features like the "vid" option, please follow the guidelines for
configuring "vlan aware" bridging. For more details review the Layer2
Interface Guide at U(http://docs.cumulusnetworks.com)
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Address method.
choices:
- loopback
- dhcp
speed:
description:
- Set speed of the swp(front panel) or management(eth0) interface.
speed is in MB.
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
mstpctl_bpduguard:
description:
- Enables BPDU Guard on a port in vlan-aware mode.
mstpctl_portnetwork:
description:
- Enables bridge assurance in vlan-aware mode.
mstpctl_portadminedge:
description:
- Enables admin edge port.
clagd_enable:
description:
- Enables the clagd daemon. This command should only be applied to
the clag peerlink interface.
clagd_priority:
description:
- Integer that changes the role the switch has in the clag domain.
The lower priority switch will assume the primary role. The number
can be between 0 and 65535.
clagd_peer_ip:
description:
- IP address of the directly connected peer switch interface.
clagd_sys_mac:
description:
- Clagd system mac address. Recommended to use the range starting
with 44:38:39:ff. Needs to be the same between 2 Clag switches.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
location:
description:
- Interface directory location
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager - \
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
- name: Configure a front panel port with an IP
cl_interface:
name: swp1
ipv4: 10.1.1.1/24
notify: reload networking
- name: Configure front panel to use DHCP
cl_interface:
name: swp2
addr_family: dhcp
notify: reload networking
- name: Configure a SVI for vlan 100 interface with an IP
cl_interface:
name: bridge.100
ipv4: 10.1.1.1/24
notify: reload networking
- name: Configure subinterface with an IP
cl_interface:
name: bond0.100
alias_name: my bond
ipv4: 10.1.1.1/24
notify: reload networking
# define cl_interfaces once in tasks
# then write interfaces in variables file
# with just the options you want.
- name: Create interfaces
cl_interface:
name: '{{ item.key }}'
ipv4: '{{ item.value.ipv4 | default(omit) }}'
ipv6: '{{ item.value.ipv6 | default(omit) }}'
alias_name: '{{ item.value.alias_name | default(omit) }}'
addr_method: '{{ item.value.addr_method | default(omit) }}'
speed: '{{ item.value.link_speed | default(omit) }}'
mtu: '{{ item.value.mtu | default(omit) }}'
clagd_enable: '{{ item.value.clagd_enable | default(omit) }}'
clagd_peer_ip: '{{ item.value.clagd_peer_ip | default(omit) }}'
clagd_sys_mac: '{{ item.value.clagd_sys_mac | default(omit) }}'
clagd_priority: '{{ item.value.clagd_priority | default(omit) }}'
vids: '{{ item.value.vids | default(omit) }}'
virtual_ip: '{{ item.value.virtual_ip | default(omit) }}'
virtual_mac: '{{ item.value.virtual_mac | default(omit) }}'
mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork | default('no') }}"
mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge | default('no') }}"
mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard | default('no') }}"
with_dict: '{{ cl_interfaces }}'
notify: reload networking
# In vars file
# ============
---
cl_interfaces:
swp1:
alias_name: uplink to isp
ipv4: 10.1.1.1/24
swp2:
alias_name: l2 trunk connection
vids:
- 1
- 50
swp3:
speed: 1000
alias_name: connects to 1G link
##########
# br0 interface is configured by cl_bridge
##########
br0.100:
alias_name: SVI for vlan 100
ipv4: 10.2.2.2/24
ipv6: '10:2:2::2/127'
virtual_ip: 10.2.2.254
virtual_mac: 00:00:5E:00:10:10
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def build_speed(module):
_speed = module.params.get('speed')
if _speed:
module.custom_desired_config['config']['link-speed'] = str(_speed)
module.custom_desired_config['config']['link-duplex'] = 'full'
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
build_addr_method(module)
build_address(module)
build_vids(module)
build_pvid(module)
build_speed(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge',
'mstpctl_bpduguard', 'clagd_enable',
'clagd_priority', 'clagd_peer_ip',
'clagd_sys_mac', 'clagd_args']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'loopback', 'dhcp']),
speed=dict(type='str'),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS),
mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS),
mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS),
clagd_enable=dict(type='bool', choices=BOOLEANS),
clagd_priority=dict(type='str'),
clagd_peer_ip=dict(type='str'),
clagd_sys_mac=dict(type='str'),
clagd_args=dict(type='str'),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
required_together=[
['virtual_ip', 'virtual_mac'],
['clagd_enable', 'clagd_priority',
'clagd_peer_ip', 'clagd_sys_mac']
]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.items():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
if __name__ == '__main__':
main()
| gpl-3.0 |
anntzer/scipy | scipy/linalg/matfuncs.py | 21 | 19751 | #
# Author: Travis Oliphant, March 2002
#
__all__ = ['expm','cosm','sinm','tanm','coshm','sinhm',
'tanhm','logm','funm','signm','sqrtm',
'expm_frechet', 'expm_cond', 'fractional_matrix_power',
'khatri_rao']
from numpy import (Inf, dot, diag, prod, logical_not, ravel,
transpose, conjugate, absolute, amax, sign, isfinite, single)
import numpy as np
# Local imports
from .misc import norm
from .basic import solve, inv
from .special_matrices import triu
from .decomp_svd import svd
from .decomp_schur import schur, rsf2csf
from ._expm_frechet import expm_frechet, expm_cond
from ._matfuncs_sqrtm import sqrtm
eps = np.finfo(float).eps
feps = np.finfo(single).eps
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
###############################################################################
# Utility functions.
def _asarray_square(A):
"""
Wraps asarray with the extra requirement that the input be a square matrix.
The motivation is that the matfuncs module has real functions that have
been lifted to square matrix functions.
Parameters
----------
A : array_like
A square matrix.
Returns
-------
out : ndarray
An ndarray copy or view or other representation of A.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square array_like input')
return A
def _maybe_real(A, B, tol=None):
"""
Return either B or the real part of B, depending on properties of A and B.
The motivation is that B has been computed as a complicated function of A,
and B may be perturbed by negligible imaginary components.
If A is real and B is complex with small imaginary components,
then return a real copy of B. The assumption in that case would be that
the imaginary components of B are numerical artifacts.
Parameters
----------
A : ndarray
Input array whose type is to be checked as real vs. complex.
B : ndarray
Array to be returned, possibly without its imaginary part.
tol : float
Absolute tolerance.
Returns
-------
out : real or complex array
Either the input array B or only the real part of the input array B.
"""
# Note that booleans and integers compare as real.
if np.isrealobj(A) and np.iscomplexobj(B):
if tol is None:
tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
if np.allclose(B.imag, 0.0, atol=tol):
B = B.real
return B
###############################################################################
# Matrix functions.
def fractional_matrix_power(A, t):
"""
Compute the fractional power of a matrix.
Proceeds according to the discussion in section (6) of [1]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
Examples
--------
>>> from scipy.linalg import fractional_matrix_power
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> b = fractional_matrix_power(a, 0.5)
>>> b
array([[ 0.75592895, 1.13389342],
[ 0.37796447, 1.88982237]])
>>> np.dot(b, b) # Verify square root
array([[ 1., 3.],
[ 1., 4.]])
"""
# This fixes some issue with imports;
# this function calls onenormest which is in scipy.sparse.
A = _asarray_square(A)
import scipy.linalg._matfuncs_inv_ssq
return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
def logm(A, disp=True):
"""
Compute matrix logarithm.
The matrix logarithm is the inverse of
expm: expm(logm(`A`)) == `A`
Parameters
----------
A : (N, N) array_like
Matrix whose logarithm to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
Examples
--------
>>> from scipy.linalg import logm, expm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> b = logm(a)
>>> b
array([[-1.02571087, 2.05142174],
[ 0.68380725, 1.02571087]])
>>> expm(b) # Verify expm(logm(a)) returns a
array([[ 1., 3.],
[ 1., 4.]])
"""
A = _asarray_square(A)
# Avoid circular import ... this is OK, right?
import scipy.linalg._matfuncs_inv_ssq
F = scipy.linalg._matfuncs_inv_ssq._logm(A)
F = _maybe_real(A, F)
errtol = 1000*eps
#TODO use a better error approximation
errest = norm(expm(F)-A,1) / norm(A,1)
if disp:
if not isfinite(errest) or errest >= errtol:
print("logm result may be inaccurate, approximate err =", errest)
return F
else:
return F, errest
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (N, N) array_like or sparse matrix
Matrix to be exponentiated.
Returns
-------
expm : (N, N) ndarray
Matrix exponential of `A`.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Matrix version of the formula exp(0) = 1:
>>> expm(np.zeros((2,2)))
array([[ 1., 0.],
[ 0., 1.]])
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
# Input checking and conversion is provided by sparse.linalg.expm().
import scipy.sparse.linalg
return scipy.sparse.linalg.expm(A)
def cosm(A):
"""
Compute the matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
cosm : (N, N) ndarray
Matrix cosine of A
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
A = _asarray_square(A)
if np.iscomplexobj(A):
return 0.5*(expm(1j*A) + expm(-1j*A))
else:
return expm(1j*A).real
def sinm(A):
"""
Compute the matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinm : (N, N) ndarray
Matrix sine of `A`
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
A = _asarray_square(A)
if np.iscomplexobj(A):
return -0.5j*(expm(1j*A) - expm(-1j*A))
else:
return expm(1j*A).imag
def tanm(A):
"""
Compute the matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
tanm : (N, N) ndarray
Matrix tangent of `A`
Examples
--------
>>> from scipy.linalg import tanm, sinm, cosm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> t = tanm(a)
>>> t
array([[ -2.00876993, -8.41880636],
[ -2.80626879, -10.42757629]])
Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
>>> s = sinm(a)
>>> c = cosm(a)
>>> s.dot(np.linalg.inv(c))
array([[ -2.00876993, -8.41880636],
[ -2.80626879, -10.42757629]])
"""
A = _asarray_square(A)
return _maybe_real(A, solve(cosm(A), sinm(A)))
def coshm(A):
"""
Compute the hyperbolic matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
coshm : (N, N) ndarray
Hyperbolic matrix cosine of `A`
Examples
--------
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> c = coshm(a)
>>> c
array([[ 11.24592233, 38.76236492],
[ 12.92078831, 50.00828725]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> t = tanhm(a)
>>> s = sinhm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
def sinhm(A):
"""
Compute the hyperbolic matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinhm : (N, N) ndarray
Hyperbolic matrix sine of `A`
Examples
--------
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> s = sinhm(a)
>>> s
array([[ 10.57300653, 39.28826594],
[ 13.09608865, 49.86127247]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> t = tanhm(a)
>>> c = coshm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
def tanhm(A):
"""
Compute the hyperbolic matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
tanhm : (N, N) ndarray
Hyperbolic matrix tangent of `A`
Examples
--------
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> t = tanhm(a)
>>> t
array([[ 0.3428582 , 0.51987926],
[ 0.17329309, 0.86273746]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> s = sinhm(a)
>>> c = coshm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, solve(coshm(A), sinhm(A)))
def funm(A, func, disp=True):
"""
Evaluate a matrix function specified by a callable.
Returns the value of matrix-valued function ``f`` at `A`. The
function ``f`` is an extension of the scalar-valued function `func`
to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the function
func : callable
Callable object that evaluates a scalar function f.
Must be vectorized (eg. using vectorize).
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
funm : (N, N) ndarray
Value of the matrix function specified by func evaluated at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
Examples
--------
>>> from scipy.linalg import funm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> funm(a, lambda x: x*x)
array([[ 4., 15.],
[ 5., 19.]])
>>> a.dot(a)
array([[ 4., 15.],
[ 5., 19.]])
Notes
-----
This function implements the general algorithm based on Schur decomposition
(Algorithm 9.1.1. in [1]_).
If the input matrix is known to be diagonalizable, then relying on the
eigendecomposition is likely to be faster. For example, if your matrix is
Hermitian, you can do
>>> from scipy.linalg import eigh
>>> def funm_herm(a, func, check_finite=False):
... w, v = eigh(a, check_finite=check_finite)
... ## if you further know that your matrix is positive semidefinite,
... ## you can optionally guard against precision errors by doing
... # w = np.maximum(w, 0)
... w = func(w)
... return (v * w).dot(v.conj().T)
References
----------
.. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
"""
A = _asarray_square(A)
# Perform Shur decomposition (lapack ?gees)
T, Z = schur(A)
T, Z = rsf2csf(T,Z)
n,n = T.shape
F = diag(func(diag(T))) # apply function to diagonal elements
F = F.astype(T.dtype.char) # e.g., when F is real but T is complex
minden = abs(T[0,0])
# implement Algorithm 11.1.1 from Golub and Van Loan
# "matrix Computations."
for p in range(1,n):
for i in range(1,n-p+1):
j = i + p
s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
ksl = slice(i,j-1)
val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
s = s + val
den = T[j-1,j-1] - T[i-1,i-1]
if den != 0.0:
s = s / den
F[i-1,j-1] = s
minden = min(minden,abs(den))
F = dot(dot(Z, F), transpose(conjugate(Z)))
F = _maybe_real(A, F)
tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
if minden == 0.0:
minden = tol
err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
if prod(ravel(logical_not(isfinite(F))),axis=0):
err = Inf
if disp:
if err > 1000*tol:
print("funm result may be inaccurate, approximate err =", err)
return F
else:
return F, err
def signm(A, disp=True):
"""
Matrix sign function.
Extension of the scalar sign(x) to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the sign function
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
signm : (N, N) ndarray
Value of the sign function at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
Examples
--------
>>> from scipy.linalg import signm, eigvals
>>> a = [[1,2,3], [1,2,1], [1,1,1]]
>>> eigvals(a)
array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
>>> eigvals(signm(a))
array([-1.+0.j, 1.+0.j, 1.+0.j])
"""
A = _asarray_square(A)
def rounded_sign(x):
rx = np.real(x)
if rx.dtype.char == 'f':
c = 1e3*feps*amax(x)
else:
c = 1e3*eps*amax(x)
return sign((absolute(rx) > c) * rx)
result, errest = funm(A, rounded_sign, disp=0)
errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
if errest < errtol:
return result
# Handle signm of defective matrices:
# See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
# 8:237-250,1981" for how to improve the following (currently a
# rather naive) iteration process:
# a = result # sometimes iteration converges faster but where??
# Shifting to avoid zero eigenvalues. How to ensure that shifting does
# not change the spectrum too much?
vals = svd(A, compute_uv=0)
max_sv = np.amax(vals)
# min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
# c = 0.5/min_nonzero_sv
c = 0.5/max_sv
S0 = A + c*np.identity(A.shape[0])
prev_errest = errest
for i in range(100):
iS0 = inv(S0)
S0 = 0.5*(S0 + iS0)
Pp = 0.5*(dot(S0,S0)+S0)
errest = norm(dot(Pp,Pp)-Pp,1)
if errest < errtol or prev_errest == errest:
break
prev_errest = errest
if disp:
if not isfinite(errest) or errest >= errtol:
print("signm result may be inaccurate, approximate err =", errest)
return S0
else:
return S0, errest
def khatri_rao(a, b):
r"""
Khatri-rao product
A column-wise Kronecker product of two matrices
Parameters
----------
a: (n, k) array_like
Input array
b: (m, k) array_like
Input array
Returns
-------
c: (n*m, k) ndarray
Khatri-rao product of `a` and `b`.
Notes
-----
The mathematical definition of the Khatri-Rao product is:
.. math::
(A_{ij} \bigotimes B_{ij})_{ij}
which is the Kronecker product of every column of A and B, e.g.::
c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
See Also
--------
kron : Kronecker product
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]])
>>> linalg.khatri_rao(a, b)
array([[ 3, 8, 15],
[ 6, 14, 24],
[ 2, 6, 27],
[12, 20, 30],
[24, 35, 48],
[ 8, 15, 54]])
"""
a = np.asarray(a)
b = np.asarray(b)
if not(a.ndim == 2 and b.ndim == 2):
raise ValueError("The both arrays should be 2-dimensional.")
if not a.shape[1] == b.shape[1]:
raise ValueError("The number of columns for both arrays "
"should be equal.")
# c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :]
return c.reshape((-1,) + c.shape[2:])
| bsd-3-clause |
renatogames2/namebench | nb_third_party/simplejson/scanner.py | 674 | 2560 | """JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| apache-2.0 |
pocmo/focus-android | tools/l10n/locales.py | 2 | 1923 | #!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""This file contains various locale lists consumed by other tools"""
# Sorted list of locales that ship in release builds of Focus/Klar.
#
# Other builds might include more locales.
#
# Note that there are differences in the locale codes used by Pontoon
# and by Android (e.g. Hebrew: he (Pontoon) vs. iw (Android)).
# This list uses the Android notation. A valid entry can be a
# language code (de) or a language plus country (de-DE). Do not use
# the name of the Androidresource folder (de-rDE) here.
#
# Releases should contain all locales that are at 100% or have been
# shipping in previous releases. Ping :delphine in case you want
# to add or remove locales from releases.
RELEASE_LOCALES = [
"ar",
"ast",
"az",
"bg",
"bn-BD",
"bn-IN",
"bs",
"ca",
"cak",
"cs",
"cy",
"da",
"de",
"dsb",
"el",
"eo",
"es-AR",
"es-CL",
"es-ES",
"es-MX",
"eu",
"fa",
"fi",
"fr",
"fy-NL",
"ga-IE",
"hi-IN",
"hsb",
"hu",
"hy-AM",
"ia",
"in",
"it",
"iw",
"ja",
"ka",
"kab",
"kk",
"ko",
"lo",
"meh",
"mix",
"ms",
"my",
"nb-NO",
"ne-NP",
"nl",
"nn-NO",
"pl",
"pt-BR",
"ro",
"ru",
"sk",
"sl",
"sq",
"sr",
"sv-SE",
"ta",
"te",
"th",
"tr",
"trs",
"uk",
"ur",
"vi",
"zh-CN",
"zh-TW",
"zam"
]
# This is the list of locales that we want to take automated screenshots of
# in addition to the list of release locales. We want to take screenshots
# of other locales so that translators of not yet completed locales can
# verify their work in progress.
ADDITIONAL_SCREENSHOT_LOCALES = [
"lt"
]
# Those are locales that we take automated screenshots of.
SCREENSHOT_LOCALES = sorted(RELEASE_LOCALES + ADDITIONAL_SCREENSHOT_LOCALES)
| mpl-2.0 |
tensorflow/probability | tensorflow_probability/python/distributions/poisson_test.py | 1 | 22478 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions import poisson as poisson_lib
from tensorflow_probability.python.distributions.internal import statistical_testing as st
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import implementation_selection
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class PoissonTest(test_util.TestCase):
def _make_poisson(self,
rate,
validate_args=True,
force_probs_to_zero_outside_support=False):
return tfd.Poisson(
rate=rate,
validate_args=validate_args,
force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)
def testPoissonShape(self):
lam = tf.constant([3.0] * 5)
poisson = self._make_poisson(rate=lam)
self.assertEqual(self.evaluate(poisson.batch_shape_tensor()), (5,))
self.assertEqual(poisson.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(poisson.event_shape_tensor()), [])
self.assertEqual(poisson.event_shape, tf.TensorShape([]))
def testInvalidLam(self):
invalid_lams = [-.01, -1., -2.]
for lam in invalid_lams:
with self.assertRaisesOpError('Argument `rate` must be non-negative.'):
poisson = self._make_poisson(rate=lam)
self.evaluate(poisson.rate_parameter())
def testZeroLam(self):
lam = 0.
poisson = tfd.Poisson(rate=lam, validate_args=True)
self.assertAllClose(lam, self.evaluate(poisson.rate))
self.assertAllClose(0., poisson.prob(3))
self.assertAllClose(1., poisson.prob(0))
self.assertAllClose(0., poisson.log_prob(0))
def testPoissonLogPmfDiscreteMatchesScipy(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True, validate_args=False)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.shape, (batch_size,))
self.assertAllClose(self.evaluate(log_pmf), stats.poisson.logpmf(x, lam_v))
pmf = poisson.prob(x)
self.assertEqual(pmf.shape, (batch_size,))
self.assertAllClose(self.evaluate(pmf), stats.poisson.pmf(x, lam_v))
def testPoissonLogPmfContinuousRelaxation(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
x = tf.constant([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.])
poisson = self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=False,
validate_args=False)
expected_continuous_log_pmf = (
x * poisson.log_rate_parameter()
- tf.math.lgamma(1. + x) - poisson.rate_parameter())
expected_continuous_log_pmf = tf.where(
x >= 0., expected_continuous_log_pmf,
dtype_util.as_numpy_dtype(
expected_continuous_log_pmf.dtype)(-np.inf))
expected_continuous_pmf = tf.exp(expected_continuous_log_pmf)
log_pmf = poisson.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf),
self.evaluate(expected_continuous_log_pmf))
pmf = poisson.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf),
self.evaluate(expected_continuous_pmf))
@test_util.numpy_disable_gradient_test
def testPoissonLogPmfGradient(self):
batch_size = 6
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
# Only non-negative values, as negative ones cause nans in the expected
# value.
x = np.array([0., 2., 3., 4., 5., 6.], dtype=np.float32)
_, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(
lambda lam: self._make_poisson(rate=lam).log_prob(x), lam))
# A finite difference approximation of the derivative.
eps = 1e-6
expected = (stats.poisson.logpmf(x, lam_v + eps)
- stats.poisson.logpmf(x, lam_v - eps)) / (2 * eps)
self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))
self.assertAllClose(dlog_pmf_dlam, expected)
@test_util.numpy_disable_gradient_test
def testPoissonLogPmfGradientAtZeroPmf(self):
# Check that the derivative wrt parameter at the zero-prob points is zero.
batch_size = 6
lam = tf.constant([3.0] * batch_size)
x = tf.constant([-2., -1., -0.5, 0.2, 1.5, 10.5])
def poisson_log_prob(lam):
return self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=True,
validate_args=False).log_prob(x)
_, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(
poisson_log_prob, lam))
self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))
self.assertAllClose(dlog_pmf_dlam, np.zeros([batch_size]))
def testPoissonLogPmfMultidimensional(self):
batch_size = 6
lam = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
lam_v = np.array([2.0, 4.0, 5.0], dtype=np.float32)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
poisson = self._make_poisson(rate=lam)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.shape, (6, 3))
self.assertAllClose(self.evaluate(log_pmf), stats.poisson.logpmf(x, lam_v))
pmf = poisson.prob(x)
self.assertEqual(pmf.shape, (6, 3))
self.assertAllClose(self.evaluate(pmf), stats.poisson.pmf(x, lam_v))
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
def testPoissonCdf(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True, validate_args=False)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(log_cdf), stats.poisson.logcdf(x, lam_v))
cdf = poisson.cdf(x)
self.assertEqual(cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(cdf), stats.poisson.cdf(x, lam_v))
def testPoissonSurvivalFunction(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True, validate_args=False)
log_survival = poisson.log_survival_function(x)
self.assertEqual(log_survival.shape, (batch_size,))
self.assertAllClose(
self.evaluate(log_survival), stats.poisson.logsf(x, lam_v))
survival = poisson.survival_function(x)
self.assertEqual(survival.shape, (batch_size,))
self.assertAllClose(self.evaluate(survival), stats.poisson.sf(x, lam_v))
small_probs = tfd.Poisson(rate=0.123).log_survival_function(
np.linspace(10, 19, 10))
self.assertAllFinite(self.evaluate(small_probs))
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
def testPoissonCdfContinuousRelaxation(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
expected_continuous_cdf = tf.math.igammac(1. + x, lam)
expected_continuous_cdf = tf.where(x >= 0., expected_continuous_cdf,
tf.zeros_like(expected_continuous_cdf))
expected_continuous_log_cdf = tf.math.log(expected_continuous_cdf)
poisson = self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=False, validate_args=False)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(log_cdf),
self.evaluate(expected_continuous_log_cdf))
cdf = poisson.cdf(x)
self.assertEqual(cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(cdf),
self.evaluate(expected_continuous_cdf))
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
@test_util.numpy_disable_gradient_test
def testPoissonCdfGradient(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
def cdf(lam):
return self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=True,
validate_args=False).cdf(x)
_, dcdf_dlam = self.evaluate(tfp.math.value_and_gradient(cdf, lam))
# A finite difference approximation of the derivative.
eps = 1e-6
expected = (stats.poisson.cdf(x, lam_v + eps)
- stats.poisson.cdf(x, lam_v - eps)) / (2 * eps)
self.assertEqual(dcdf_dlam.shape, (batch_size,))
self.assertAllClose(dcdf_dlam, expected)
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
def testPoissonCdfMultidimensional(self):
batch_size = 6
lam = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
lam_v = np.array([2.0, 4.0, 5.0], dtype=np.float32)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.shape, (6, 3))
self.assertAllClose(self.evaluate(log_cdf), stats.poisson.logcdf(x, lam_v))
cdf = poisson.cdf(x)
self.assertEqual(cdf.shape, (6, 3))
self.assertAllClose(self.evaluate(cdf), stats.poisson.cdf(x, lam_v))
def testPoissonMean(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.mean().shape, (3,))
self.assertAllClose(
self.evaluate(poisson.mean()), stats.poisson.mean(lam_v))
self.assertAllClose(self.evaluate(poisson.mean()), lam_v)
def testPoissonVariance(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.variance().shape, (3,))
self.assertAllClose(
self.evaluate(poisson.variance()), stats.poisson.var(lam_v))
self.assertAllClose(self.evaluate(poisson.variance()), lam_v)
def testPoissonStd(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.stddev().shape, (3,))
self.assertAllClose(
self.evaluate(poisson.stddev()), stats.poisson.std(lam_v))
self.assertAllClose(self.evaluate(poisson.stddev()), np.sqrt(lam_v))
def testPoissonMode(self):
lam_v = np.array([1.0, 3.0, 2.5, 3.2, 1.1, 0.05], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.mode().shape, (6,))
self.assertAllClose(self.evaluate(poisson.mode()), np.floor(lam_v))
def testPoissonMultipleMode(self):
lam_v = np.array([1.0, 3.0, 2.0, 4.0, 5.0, 10.0], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
# For the case where lam is an integer, the modes are: lam and lam - 1.
# In this case, we get back the larger of the two modes.
self.assertEqual((6,), poisson.mode().shape)
self.assertAllClose(lam_v, self.evaluate(poisson.mode()))
def testPoissonSample(self):
lam_v = 4.0
lam = tf.constant(lam_v)
# Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be
# within `k` std. deviations of actual up to rtol precision.
n = int(100e3)
poisson = self._make_poisson(rate=lam)
samples = poisson.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(), stats.poisson.mean(lam_v), rtol=.01)
self.assertAllClose(sample_values.var(), stats.poisson.var(lam_v),
rtol=.013)
def testAssertValidSample(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
with self.assertRaisesOpError('Condition x >= 0'):
self.evaluate(poisson.cdf([-1.2, 3., 4.2]))
def testPoissonSampleMultidimensionalMean(self):
lam_v = np.array([np.arange(1, 51, dtype=np.float32)]) # 1 x 50
poisson = self._make_poisson(rate=lam_v)
# Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be
# within `k` std. deviations of actual up to rtol precision.
n = int(100e3)
samples = poisson.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 1, 50))
self.assertEqual(sample_values.shape, (n, 1, 50))
self.assertAllClose(
sample_values.mean(axis=0), stats.poisson.mean(lam_v), rtol=.01, atol=0)
def testPoissonSampleMultidimensionalVariance(self):
lam_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
poisson = self._make_poisson(rate=lam_v)
# Choosing `n >= 2 * lam * (k/rtol)**2, roughly ensures our sample
# variance should be within `k` std. deviations of actual up to rtol
# precision.
n = int(300e3)
samples = poisson.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 1, 10))
self.assertEqual(sample_values.shape, (n, 1, 10))
self.assertAllClose(
sample_values.var(axis=0), stats.poisson.var(lam_v), rtol=.03, atol=0)
@test_util.tf_tape_safety_test
def testGradientThroughRate(self):
rate = tf.Variable(3.)
dist = self._make_poisson(rate=rate)
with tf.GradientTape() as tape:
loss = -dist.log_prob([1., 2., 4.])
grad = tape.gradient(loss, dist.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
def testAssertsNonNegativeRate(self):
rate = tf.Variable([1., 2., -3.])
self.evaluate(rate.initializer)
with self.assertRaisesOpError('Argument `rate` must be non-negative.'):
dist = self._make_poisson(rate=rate, validate_args=True)
self.evaluate(dist.sample(seed=test_util.test_seed()))
def testAssertsNonNegativeRateAfterMutation(self):
rate = tf.Variable([1., 2., 3.])
self.evaluate(rate.initializer)
dist = self._make_poisson(rate=rate, validate_args=True)
self.evaluate(dist.mean())
with self.assertRaisesOpError('Argument `rate` must be non-negative.'):
with tf.control_dependencies([rate.assign([1., 2., -3.])]):
self.evaluate(dist.sample(seed=test_util.test_seed()))
@test_util.test_all_tf_execution_regimes
class PoissonLogRateTest(PoissonTest):
def _make_poisson(self,
rate,
validate_args=True,
force_probs_to_zero_outside_support=False):
return tfd.Poisson(
log_rate=tf.math.log(rate),
validate_args=validate_args,
force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)
# No need to worry about the non-negativity of `rate` when using the
# `log_rate` parameterization.
def testInvalidLam(self):
pass
def testAssertsNonNegativeRate(self):
pass
def testAssertsNonNegativeRateAfterMutation(self):
pass
# The gradient is not tracked through tf.math.log(rate) in _make_poisson(),
# so log_rate needs to be defined as a Variable and passed directly.
@test_util.tf_tape_safety_test
def testGradientThroughRate(self):
log_rate = tf.Variable(3.)
dist = tfd.Poisson(log_rate=log_rate, validate_args=True)
with tf.GradientTape() as tape:
loss = -dist.log_prob([1., 2., 4.])
grad = tape.gradient(loss, dist.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
@test_util.test_graph_and_eager_modes
class PoissonSamplingTest(test_util.TestCase):
@test_util.jax_disable_test_missing_functionality('tf stateless_poisson')
def testSampleCPU(self):
with tf.device('CPU'):
_, runtime = self.evaluate(
poisson_lib.random_poisson(
shape=tf.constant([], dtype=tf.int32),
rates=tf.constant(10.),
seed=test_util.test_seed()))
self.assertEqual(implementation_selection._RUNTIME_CPU, runtime)
def testSampleGPU(self):
if not tf.test.is_gpu_available():
self.skipTest('no GPU')
with tf.device('GPU'):
_, runtime = self.evaluate(poisson_lib.random_poisson(
shape=tf.constant([], dtype=tf.int32),
rates=tf.constant(10.),
seed=test_util.test_seed()))
self.assertEqual(implementation_selection._RUNTIME_DEFAULT, runtime)
def testSampleXLA(self):
self.skip_if_no_xla()
if not tf.executing_eagerly(): return # jit_compile is eager-only.
log_rates = np.random.rand(4, 3).astype(np.float32)
dist = tfd.Poisson(log_rate=log_rates, validate_args=True)
# Verify the compile succeeds going all the way through the distribution.
self.evaluate(
tf.function(lambda: dist.sample(5, seed=test_util.test_seed()),
jit_compile=True)())
# Also test the low-level sampler and verify the XLA-friendly variant.
_, runtime = self.evaluate(
tf.function(poisson_lib.random_poisson, jit_compile=True)(
shape=tf.constant([], dtype=tf.int32),
rates=tf.constant(10.),
seed=test_util.test_seed()))
self.assertEqual(implementation_selection._RUNTIME_DEFAULT, runtime)
def testSamplePoissonLowRates(self):
# Low log rate (< log(10.)) samples would use Knuth's algorithm.
rate = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]
log_rate = np.log(rate)
num_samples = int(1e5)
self.assertLess(
self.evaluate(
st.min_num_samples_for_dkwm_cdf_test(
discrepancy=0.04, false_fail_rate=1e-9, false_pass_rate=1e-9)),
num_samples)
samples = poisson_lib._random_poisson_noncpu(
shape=[num_samples],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed())
poisson = tfd.Poisson(log_rate=log_rate, validate_args=True)
self.evaluate(
st.assert_true_cdf_equal_by_dkwm(
samples,
poisson.cdf,
st.left_continuous_cdf_discrete_distribution(poisson),
false_fail_rate=1e-9))
self.assertAllClose(
self.evaluate(tf.math.reduce_mean(samples, axis=0)),
stats.poisson.mean(rate),
rtol=0.01)
self.assertAllClose(
self.evaluate(tf.math.reduce_variance(samples, axis=0)),
stats.poisson.var(rate),
rtol=0.05)
def testSamplePoissonHighRates(self):
# High rate (>= log(10.)) samples would use rejection sampling.
rate = [10., 10.5, 11., 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5]
log_rate = np.log(rate)
num_samples = int(1e5)
self.assertLess(
self.evaluate(
st.min_num_samples_for_dkwm_cdf_test(
discrepancy=0.04, false_fail_rate=1e-9, false_pass_rate=1e-9)),
num_samples)
samples = poisson_lib._random_poisson_noncpu(
shape=[num_samples],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed())
poisson = tfd.Poisson(log_rate=log_rate, validate_args=True)
self.evaluate(
st.assert_true_cdf_equal_by_dkwm(
samples,
poisson.cdf,
st.left_continuous_cdf_discrete_distribution(poisson),
false_fail_rate=1e-9))
self.assertAllClose(
self.evaluate(tf.math.reduce_mean(samples, axis=0)),
stats.poisson.mean(rate),
rtol=0.01)
self.assertAllClose(
self.evaluate(tf.math.reduce_variance(samples, axis=0)),
stats.poisson.var(rate),
rtol=0.05)
def testSamplePoissonLowAndHighRates(self):
rate = [1., 3., 5., 6., 7., 10., 13.0, 14., 15., 18.]
log_rate = np.log(rate)
num_samples = int(1e5)
poisson = tfd.Poisson(log_rate=log_rate, validate_args=True)
self.assertLess(
self.evaluate(
st.min_num_samples_for_dkwm_cdf_test(
discrepancy=0.04, false_fail_rate=1e-9, false_pass_rate=1e-9)),
num_samples)
samples = poisson_lib._random_poisson_noncpu(
shape=[num_samples],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed())
self.evaluate(
st.assert_true_cdf_equal_by_dkwm(
samples,
poisson.cdf,
st.left_continuous_cdf_discrete_distribution(poisson),
false_fail_rate=1e-9))
def testSamplePoissonInvalidRates(self):
rate = [np.nan, -1., 0., 5., 7., 10., 13.0, 14., 15., 18.]
log_rate = np.log(rate)
samples = self.evaluate(
poisson_lib._random_poisson_noncpu(
shape=[int(1e5)],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed()))
self.assertAllClose(
self.evaluate(tf.math.reduce_mean(samples, axis=0)),
stats.poisson.mean(rate),
rtol=0.01)
self.assertAllClose(
self.evaluate(tf.math.reduce_variance(samples, axis=0)),
stats.poisson.var(rate),
rtol=0.05)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
nanolearningllc/edx-platform-cypress | common/djangoapps/external_auth/tests/test_shib.py | 42 | 30355 | # -*- coding: utf-8 -*-
"""
Tests for Shibboleth Authentication
@jbau
"""
import unittest
from ddt import ddt, data
from django.conf import settings
from django.http import HttpResponseRedirect
from django.test import TestCase
from django.test.client import RequestFactory, Client as DjangoTestClient
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser, User
from django.utils.importlib import import_module
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
from external_auth.views import (
shib_login, course_specific_login, course_specific_register, _flatten_to_ascii
)
from mock import patch
from urllib import urlencode
from student.views import create_account, change_enrollment
from student.models import UserProfile, CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore import ModuleStoreEnum
# Shib is supposed to provide 'REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider'
# attributes via request.META. We can count on 'Shib-Identity-Provider', and 'REMOTE_USER' being present
# b/c of how mod_shib works but should test the behavior with the rest of the attributes present/missing
# For the sake of python convention we'll make all of these variable names ALL_CAPS
# These values would all returned from request.META, so they need to be str, not unicode
IDP = 'https://idp.stanford.edu/'
REMOTE_USER = 'test_user@stanford.edu'
MAILS = [None, '', 'test_user@stanford.edu'] # unicode shouldn't be in emails, would fail django's email validator
DISPLAYNAMES = [None, '', 'Jason 包']
GIVENNAMES = [None, '', 'jasön; John; bob'] # At Stanford, the givenNames can be a list delimited by ';'
SNS = [None, '', '包; smith'] # At Stanford, the sns can be a list delimited by ';'
def gen_all_identities():
"""
A generator for all combinations of test inputs.
Each generated item is a dict that represents what a shib IDP
could potentially pass to django via request.META, i.e.
setting (or not) request.META['givenName'], etc.
"""
def _build_identity_dict(mail, display_name, given_name, surname):
""" Helper function to return a dict of test identity """
meta_dict = {'Shib-Identity-Provider': IDP,
'REMOTE_USER': REMOTE_USER}
if display_name is not None:
meta_dict['displayName'] = display_name
if mail is not None:
meta_dict['mail'] = mail
if given_name is not None:
meta_dict['givenName'] = given_name
if surname is not None:
meta_dict['sn'] = surname
return meta_dict
for mail in MAILS:
for given_name in GIVENNAMES:
for surname in SNS:
for display_name in DISPLAYNAMES:
yield _build_identity_dict(mail, display_name, given_name, surname)
@ddt
@override_settings(SESSION_ENGINE='django.contrib.sessions.backends.cache')
class ShibSPTest(SharedModuleStoreTestCase):
"""
Tests for the Shibboleth SP, which communicates via request.META
(Apache environment variables set by mod_shib)
"""
request_factory = RequestFactory()
def setUp(self):
super(ShibSPTest, self).setUp()
self.test_user_id = ModuleStoreEnum.UserID.test
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_exception_shib_login(self):
"""
Tests that we get the error page when there is no REMOTE_USER
or Shib-Identity-Provider in request.META
"""
no_remote_user_request = self.request_factory.get('/shib-login')
no_remote_user_request.META.update({'Shib-Identity-Provider': IDP})
no_remote_user_request.user = AnonymousUser()
mako_middleware_process_request(no_remote_user_request)
no_remote_user_response = shib_login(no_remote_user_request)
self.assertEqual(no_remote_user_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_remote_user_response.content)
no_idp_request = self.request_factory.get('/shib-login')
no_idp_request.META.update({'REMOTE_USER': REMOTE_USER})
no_idp_response = shib_login(no_idp_request)
self.assertEqual(no_idp_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_idp_response.content)
def _assert_shib_login_is_logged(self, audit_log_call, remote_user):
"""Asserts that shibboleth login attempt is being logged"""
remote_user = _flatten_to_ascii(remote_user) # django usernames have to be ascii
method_name, args, _kwargs = audit_log_call
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'logged in via Shibboleth', args[0])
self.assertIn(remote_user, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login(self):
"""
Tests that:
* shib credentials that match an existing ExternalAuthMap with a linked active user logs the user in
* shib credentials that match an existing ExternalAuthMap with a linked inactive user shows error page
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user without an existing ExternalAuthMap links the two and log the user in
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user that already has an ExternalAuthMap causes an error (403)
* shib credentials that do not match an existing ExternalAuthMap causes the registration form to appear
"""
user_w_map = UserFactory.create(email='withmap@stanford.edu')
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=user_w_map)
user_wo_map = UserFactory.create(email='womap@stanford.edu')
user_w_map.save()
user_wo_map.save()
extauth.save()
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_extauth = ExternalAuthMap(external_id='inactive@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=inactive_user)
inactive_user.save()
inactive_extauth.save()
idps = ['https://idp.stanford.edu/', 'https://someother.idp.com/']
remote_users = ['withmap@stanford.edu', 'womap@stanford.edu',
'testuser2@someother_idp.com', 'inactive@stanford.edu']
for idp in idps:
for remote_user in remote_users:
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({'Shib-Identity-Provider': idp,
'REMOTE_USER': remote_user,
'mail': remote_user})
request.user = AnonymousUser()
mako_middleware_process_request(request)
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
if idp == "https://idp.stanford.edu/" and remote_user == 'withmap@stanford.edu':
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_w_map)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://idp.stanford.edu/" and remote_user == 'inactive@stanford.edu':
self.assertEqual(response.status_code, 403)
self.assertIn("Account not yet activated: please look for link in your email", response.content)
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'warning')
self.assertEquals(len(args), 1)
self.assertIn(u'is not active after external login', args[0])
# self.assertEquals(remote_user, args[1])
elif idp == "https://idp.stanford.edu/" and remote_user == 'womap@stanford.edu':
self.assertIsNotNone(ExternalAuthMap.objects.get(user=user_wo_map))
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_wo_map)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://someother.idp.com/" and remote_user in \
['withmap@stanford.edu', 'womap@stanford.edu', 'inactive@stanford.edu']:
self.assertEqual(response.status_code, 403)
self.assertIn("You have already created an account using an external login", response.content)
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
else:
self.assertEqual(response.status_code, 200)
self.assertContains(response,
("Preferences for {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
def _base_test_extauth_auto_activate_user_with_flag(self, log_user_string="inactive@stanford.edu"):
"""
Tests that FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] means extauth automatically
linked users, activates them, and logs them in
"""
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_user.save()
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'REMOTE_USER': 'inactive@stanford.edu',
'mail': 'inactive@stanford.edu'
})
request.user = AnonymousUser()
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
# reload user from db, since the view function works via db side-effects
inactive_user = User.objects.get(id=inactive_user.id)
self.assertIsNotNone(ExternalAuthMap.objects.get(user=inactive_user))
self.assertTrue(inactive_user.is_active)
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, inactive_user)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 3)
self._assert_shib_login_is_logged(audit_log_calls[0], log_user_string)
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(log_user_string, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': False})
def test_extauth_auto_activate_user_with_flag_no_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': False}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="inactive@stanford.edu")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': True})
def test_extauth_auto_activate_user_with_flag_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': True}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="user.id: 1")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form(self, identity):
"""
Tests the registration form showing up with the proper parameters.
Uses django test client for its session support
"""
client = DjangoTestClient()
# identity k/v pairs will show up in request.META
response = client.get(path='/shib-login/', data={}, follow=False, **identity)
self.assertEquals(response.status_code, 200)
mail_input_HTML = '<input class="" id="email" type="email" name="email"'
if not identity.get('mail'):
self.assertContains(response, mail_input_HTML)
else:
self.assertNotContains(response, mail_input_HTML)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
fullname_input_html = '<input id="name" type="text" name="name"'
if sn_empty and given_name_empty and displayname_empty:
self.assertContains(response, fullname_input_html)
else:
self.assertNotContains(response, fullname_input_html)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form_submit(self, identity):
"""
Tests user creation after the registration form that pops is submitted. If there is no shib
ExternalAuthMap in the session, then the created user should take the username and email from the
request.
Uses django test client for its session support
"""
# First we pop the registration form
client = DjangoTestClient()
response1 = client.get(path='/shib-login/', data={}, follow=False, **identity)
# Then we have the user answer the registration form
# These are unicode because request.POST returns unicode
postvars = {'email': u'post_email@stanford.edu',
'username': u'post_username', # django usernames can't be unicode
'password': u'post_pássword',
'name': u'post_náme',
'terms_of_service': u'true',
'honor_code': u'true'}
# use RequestFactory instead of TestClient here because we want access to request.user
request2 = self.request_factory.post('/create_account', data=postvars)
request2.session = client.session
request2.user = AnonymousUser()
mako_middleware_process_request(request2)
with patch('student.views.AUDIT_LOG') as mock_audit_log:
_response2 = create_account(request2)
user = request2.user
mail = identity.get('mail')
# verify logging of login happening during account creation:
audit_log_calls = mock_audit_log.method_calls
self.assertEquals(len(audit_log_calls), 3)
method_name, args, _kwargs = audit_log_calls[0]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success on new account creation', args[0])
self.assertIn(u'post_username', args[0])
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 2)
self.assertIn(u'User registered with external_auth', args[0])
self.assertEquals(u'post_username', args[1])
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 3)
self.assertIn(u'Updated ExternalAuthMap for ', args[0])
self.assertEquals(u'post_username', args[1])
self.assertEquals(u'test_user@stanford.edu', args[2].external_id)
# check that the created user has the right email, either taken from shib or user input
if mail:
self.assertEqual(user.email, mail)
self.assertEqual(list(User.objects.filter(email=postvars['email'])), [])
self.assertIsNotNone(User.objects.get(email=mail)) # get enforces only 1 such user
else:
self.assertEqual(user.email, postvars['email'])
self.assertEqual(list(User.objects.filter(email=mail)), [])
self.assertIsNotNone(User.objects.get(email=postvars['email'])) # get enforces only 1 such user
# check that the created user profile has the right name, either taken from shib or user input
profile = UserProfile.objects.get(user=user)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
if displayname_empty:
if sn_empty and given_name_empty:
self.assertEqual(profile.name, postvars['name'])
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertNotIn(u';', profile.name)
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertEqual(profile.name, identity.get('displayName').decode('utf-8'))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@SharedModuleStoreTestCase.modifies_courseware
@data(None, "", "shib:https://idp.stanford.edu/")
def test_course_specific_login_and_reg(self, domain):
"""
Tests that the correct course specific login and registration urls work for shib
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
user_id=self.test_user_id,
)
# Test for cases where course is found
# set domains
# temporarily set the branch to draft-preferred so we can update the course
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
course.enrollment_domain = domain
self.store.update_item(course, self.test_user_id)
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/course/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'MITx/999/Robot_Super_Course')
reg_response = course_specific_register(login_request, 'MITx/999/Robot_Super_Course')
if domain and "shib" in domain:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
else:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
# Now test for non-existent course
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'DNE/DNE/DNE')
reg_response = course_specific_register(login_request, 'DNE/DNE/DNE')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@SharedModuleStoreTestCase.modifies_courseware
def test_enrollment_limit_by_domain(self):
"""
Tests that the enrollmentDomain setting is properly limiting enrollment to those who have
the proper external auth
"""
# create 2 course, one with limited enrollment one without
shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
open_enroll_course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
enrollment_domain='',
user_id=self.test_user_id,
)
# create 3 kinds of students, external_auth matching shib_course, external_auth not matching, no external auth
shib_student = UserFactory.create()
shib_student.save()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=shib_student)
extauth.save()
other_ext_student = UserFactory.create()
other_ext_student.username = "teststudent2"
other_ext_student.email = "teststudent2@other.edu"
other_ext_student.save()
extauth = ExternalAuthMap(external_id='testuser1@other.edu',
external_email='',
external_domain='shib:https://other.edu/',
external_credentials="",
user=other_ext_student)
extauth.save()
int_student = UserFactory.create()
int_student.username = "teststudent3"
int_student.email = "teststudent3@gmail.com"
int_student.save()
# Tests the two case for courses, limited and not
for course in [shib_course, open_enroll_course]:
for student in [shib_student, other_ext_student, int_student]:
request = self.request_factory.post('/change_enrollment')
request.POST.update({'enrollment_action': 'enroll',
'course_id': course.id.to_deprecated_string()})
request.user = student
response = change_enrollment(request)
# If course is not limited or student has correct shib extauth then enrollment should be allowed
if course is open_enroll_course or student is shib_student:
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))
else:
self.assertEqual(response.status_code, 400)
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@SharedModuleStoreTestCase.modifies_courseware
def test_shib_login_enrollment(self):
"""
A functionality test that a student with an existing shib login
can auto-enroll in a class with GET or POST params. Also tests the direction functionality of
the 'next' GET/POST param
"""
student = UserFactory.create()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
internal_password="password",
user=student)
student.set_password("password")
student.save()
extauth.save()
course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
# use django test client for sessions and url processing
# no enrollment before trying
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
self.client.logout()
params = [
('course_id', course.id.to_deprecated_string()),
('enrollment_action', 'enroll'),
('next', '/testredirect')
]
request_kwargs = {'path': '/shib-login/',
'data': dict(params),
'follow': False,
'REMOTE_USER': 'testuser@stanford.edu',
'Shib-Identity-Provider': 'https://idp.stanford.edu/'}
response = self.client.get(**request_kwargs)
# successful login is a redirect to the URL that handles auto-enrollment
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], 'http://testserver/account/finish_auth?{}'.format(urlencode(params)))
class ShibUtilFnTest(TestCase):
"""
Tests util functions in shib module
"""
def test__flatten_to_ascii(self):
DIACRITIC = u"àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
STR_DIACRI = "àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
FLATTENED = u"aeiouAEIOUaeiouyAEIOUYaeiouAEIOUanoANOaeiouyAEIOUYaAcC" # pylint: disable=invalid-name
self.assertEqual(_flatten_to_ascii('jasön'), 'jason') # umlaut
self.assertEqual(_flatten_to_ascii('Jason包'), 'Jason') # mandarin, so it just gets dropped
self.assertEqual(_flatten_to_ascii('abc'), 'abc') # pass through
unicode_test = _flatten_to_ascii(DIACRITIC)
self.assertEqual(unicode_test, FLATTENED)
self.assertIsInstance(unicode_test, unicode)
str_test = _flatten_to_ascii(STR_DIACRI)
self.assertEqual(str_test, FLATTENED)
self.assertIsInstance(str_test, str)
| agpl-3.0 |
petrkotas/twitter-feed | twitter_feed/mock/tweets.py | 1 | 19865 | HOME_TIMELINE = [
{
'coordinates': None,
'truncated': False,
'created_at': 'Tue Aug 28 21:16:23 +0000 2012',
'favorited': False,
'id_str': '240558470661799936',
'in_reply_to_user_id_str': None,
'entities': {
'urls': [
],
'hashtags': [
],
'user_mentions': [
]
},
'text': 'just another test',
'contributors': None,
'id': 240558470661799936,
'retweet_count': 0,
'in_reply_to_status_id_str': None,
'geo': None,
'retweeted': False,
'in_reply_to_user_id': None,
'place': None,
'source': 'OAuth Dancer Reborn',
'user': {
'name': 'OAuth Dancer',
'profile_sidebar_fill_color': 'DDEEF6',
'profile_background_tile': True,
'profile_sidebar_border_color': 'C0DEED',
'profile_image_url': 'http://a0.twimg.com/profile_images/730275945/oauth-dancer_normal.jpg',
'created_at': 'Wed Mar 03 19:37:35 +0000 2010',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'id_str': '119476949',
'is_translator': False,
'profile_link_color': '0084B4',
'entities': {
'url': {
'urls': [
{
'expanded_url': None,
'url': 'http://bit.ly/oauth-dancer',
'indices': [
0,
26
],
'display_url': None
}
]
},
'description': None
},
'default_profile': False,
'url': 'http://bit.ly/oauth-dancer',
'contributors_enabled': False,
'favourites_count': 7,
'utc_offset': None,
'profile_image_url_https': 'https://si0.twimg.com/profile_images/730275945/oauth-dancer_normal.jpg',
'id': 119476949,
'listed_count': 1,
'profile_use_background_image': True,
'profile_text_color': '333333',
'followers_count': 28,
'lang': 'en',
'protected': False,
'geo_enabled': True,
'notifications': False,
'description': '',
'profile_background_color': 'C0DEED',
'verified': False,
'time_zone': None,
'profile_background_image_url_https': 'https://si0.twimg.com/profile_background_images/80151733/oauth-dance.png',
'statuses_count': 166,
'profile_background_image_url': 'http://a0.twimg.com/profile_background_images/80151733/oauth-dance.png',
'default_profile_image': False,
'friends_count': 14,
'following': False,
'show_all_inline_media': False,
'screen_name': 'oauth_dancer'
},
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None
},
{
'coordinates': {
'coordinates': [
-122.25831,
37.871609
],
'type': 'Point'
},
'truncated': False,
'created_at': 'Tue Aug 28 21:08:15 +0000 2012',
'favorited': False,
'id_str': '240556426106372096',
'in_reply_to_user_id_str': None,
'entities': {
'urls': [
{
'expanded_url': 'http://blogs.ischool.berkeley.edu/i290-abdt-s12/',
'url': 'http://t.co/bfj7zkDJ',
'indices': [
79,
99
],
'display_url': 'blogs.ischool.berkeley.edu/i290-abdt-s12/'
}
],
'hashtags': [
],
'user_mentions': [
{
'name': 'Cal',
'id_str': '17445752',
'id': 17445752,
'indices': [
60,
64
],
'screen_name': 'Cal'
},
{
'name': 'Othman Laraki',
'id_str': '20495814',
'id': 20495814,
'indices': [
70,
77
],
'screen_name': 'othman'
}
]
},
'text': 'lecturing at the \'analyzing big data with twitter\' class at @cal with @othman http://t.co/bfj7zkDJ',
'contributors': None,
'id': 240556426106372096,
'retweet_count': 3,
'in_reply_to_status_id_str': None,
'geo': {
'coordinates': [
37.871609,
-122.25831
],
'type': 'Point'
},
'retweeted': False,
'possibly_sensitive': False,
'in_reply_to_user_id': None,
'place': {
'name': 'Berkeley',
'country_code': 'US',
'country': 'United States',
'attributes': {
},
'url': 'http://api.twitter.com/1/geo/id/5ef5b7f391e30aff.json',
'id': '5ef5b7f391e30aff',
'bounding_box': {
'coordinates': [
[
[
-122.367781,
37.835727
],
[
-122.234185,
37.835727
],
[
-122.234185,
37.905824
],
[
-122.367781,
37.905824
]
]
],
'type': 'Polygon'
},
'full_name': 'Berkeley, CA',
'place_type': 'city'
},
'source': 'Safari on iOS',
'user': {
'name': 'Raffi Krikorian',
'profile_sidebar_fill_color': 'DDEEF6',
'profile_background_tile': False,
'profile_sidebar_border_color': 'C0DEED',
'profile_image_url': 'http://a0.twimg.com/profile_images/1270234259/raffi-headshot-casual_normal.png',
'created_at': 'Sun Aug 19 14:24:06 +0000 2007',
'location': 'San Francisco, California',
'follow_request_sent': False,
'id_str': '8285392',
'is_translator': False,
'profile_link_color': '0084B4',
'entities': {
'url': {
'urls': [
{
'expanded_url': 'http://about.me/raffi.krikorian',
'url': 'http://t.co/eNmnM6q',
'indices': [
0,
19
],
'display_url': 'about.me/raffi.krikorian'
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': True,
'url': 'http://t.co/eNmnM6q',
'contributors_enabled': False,
'favourites_count': 724,
'utc_offset': -28800,
'profile_image_url_https': 'https://si0.twimg.com/profile_images/1270234259/raffi-headshot-casual_normal.png',
'id': 8285392,
'listed_count': 619,
'profile_use_background_image': True,
'profile_text_color': '333333',
'followers_count': 18752,
'lang': 'en',
'protected': False,
'geo_enabled': True,
'notifications': False,
'description': 'Director of @twittereng\'s Platform Services. I break things.',
'profile_background_color': 'C0DEED',
'verified': False,
'time_zone': 'Pacific Time (US & Canada)',
'profile_background_image_url_https': 'https://si0.twimg.com/images/themes/theme1/bg.png',
'statuses_count': 5007,
'profile_background_image_url': 'http://a0.twimg.com/images/themes/theme1/bg.png',
'default_profile_image': False,
'friends_count': 701,
'following': True,
'show_all_inline_media': True,
'screen_name': 'raffi'
},
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None
},
{
'coordinates': None,
'truncated': False,
'created_at': 'Tue Aug 28 19:59:34 +0000 2012',
'favorited': False,
'id_str': '240539141056638977',
'in_reply_to_user_id_str': None,
'entities': {
'urls': [
],
'hashtags': [
],
'user_mentions': [
]
},
'text': 'You\'d be right more often if you thought you were wrong.',
'contributors': None,
'id': 240539141056638977,
'retweet_count': 1,
'in_reply_to_status_id_str': None,
'geo': None,
'retweeted': False,
'in_reply_to_user_id': None,
'place': None,
'source': 'web',
'user': {
'name': 'Taylor Singletary',
'profile_sidebar_fill_color': 'FBFBFB',
'profile_background_tile': True,
'profile_sidebar_border_color': '000000',
'profile_image_url': 'http://a0.twimg.com/profile_images/2546730059/f6a8zq58mg1hn0ha8vie_normal.jpeg',
'created_at': 'Wed Mar 07 22:23:19 +0000 2007',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'id_str': '819797',
'is_translator': False,
'profile_link_color': 'c71818',
'entities': {
'url': {
'urls': [
{
'expanded_url': 'http://www.rebelmouse.com/episod/',
'url': 'http://t.co/Lxw7upbN',
'indices': [
0,
20
],
'display_url': 'rebelmouse.com/episod/'
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': False,
'url': 'http://t.co/Lxw7upbN',
'contributors_enabled': False,
'favourites_count': 15990,
'utc_offset': -28800,
'profile_image_url_https': 'https://si0.twimg.com/profile_images/2546730059/f6a8zq58mg1hn0ha8vie_normal.jpeg',
'id': 819797,
'listed_count': 340,
'profile_use_background_image': True,
'profile_text_color': 'D20909',
'followers_count': 7126,
'lang': 'en',
'protected': False,
'geo_enabled': True,
'notifications': False,
'description': 'Reality Technician, Twitter API team, synthesizer enthusiast; a most excellent adventure in timelines. I know it\'s hard to believe in something you can\'t see.',
'profile_background_color': '000000',
'verified': False,
'time_zone': 'Pacific Time (US & Canada)',
'profile_background_image_url_https': 'https://si0.twimg.com/profile_background_images/643655842/hzfv12wini4q60zzrthg.png',
'statuses_count': 18076,
'profile_background_image_url': 'http://a0.twimg.com/profile_background_images/643655842/hzfv12wini4q60zzrthg.png',
'default_profile_image': False,
'friends_count': 5444,
'following': True,
'show_all_inline_media': True,
'screen_name': 'episod'
},
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None
}
]
USER_TIMELINE = [
{
'coordinates': None,
'favorited': False,
'truncated': False,
'created_at': 'Wed Aug 29 17:12:58 +0000 2012',
'id_str': '240859602684612608',
'entities': {
'urls': [
{
'expanded_url': '/blog/twitter-certified-products',
'url': 'https://t.co/MjJ8xAnT',
'indices': [
52,
73
],
'display_url': 'dev.twitter.com/blog/twitter-c\u2026'
}
],
'hashtags': [
],
'user_mentions': [
]
},
'in_reply_to_user_id_str': None,
'contributors': None,
'text': 'Introducing the Twitter Certified Products Program: https://t.co/MjJ8xAnT',
'retweet_count': 121,
'in_reply_to_status_id_str': None,
'id': 240859602684612608,
'geo': None,
'retweeted': False,
'possibly_sensitive': False,
'in_reply_to_user_id': None,
'place': None,
'user': {
'profile_sidebar_fill_color': 'DDEEF6',
'profile_sidebar_border_color': 'C0DEED',
'profile_background_tile': False,
'name': 'Twitter API',
'profile_image_url': 'http://a0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'created_at': 'Wed May 23 06:01:13 +0000 2007',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'profile_link_color': '0084B4',
'is_translator': False,
'id_str': '6253282',
'entities': {
'url': {
'urls': [
{
'expanded_url': None,
'url': '',
'indices': [
0,
22
]
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': True,
'contributors_enabled': True,
'favourites_count': 24,
'url': '',
'profile_image_url_https': 'https://si0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'utc_offset': -28800,
'id': 6253282,
'profile_use_background_image': True,
'listed_count': 10775,
'profile_text_color': '333333',
'lang': 'en',
'followers_count': 1212864,
'protected': False,
'notifications': None,
'profile_background_image_url_https': 'https://si0.twimg.com/images/themes/theme1/bg.png',
'profile_background_color': 'C0DEED',
'verified': True,
'geo_enabled': True,
'time_zone': 'Pacific Time (US & Canada)',
'description': 'The Real Twitter API. I tweet about API changes, service issues and happily answer questions about Twitter and our API. Don\'t get an answer? It\'s on my website.',
'default_profile_image': False,
'profile_background_image_url': 'http://a0.twimg.com/images/themes/theme1/bg.png',
'statuses_count': 3333,
'friends_count': 31,
'following': None,
'show_all_inline_media': False,
'screen_name': 'twitterapi'
},
'in_reply_to_screen_name': None,
'source': 'YoruFukurou',
'in_reply_to_status_id': None
},
{
'coordinates': None,
'favorited': False,
'truncated': False,
'created_at': 'Sat Aug 25 17:26:51 +0000 2012',
'id_str': '239413543487819778',
'entities': {
'urls': [
{
'expanded_url': '/issues/485',
'url': 'https://t.co/p5bOzH0k',
'indices': [
97,
118
],
'display_url': 'dev.twitter.com/issues/485'
}
],
'hashtags': [
],
'user_mentions': [
]
},
'in_reply_to_user_id_str': None,
'contributors': None,
'text': 'We are working to resolve issues with application management & logging in to the dev portal: https://t.co/p5bOzH0k ^TS',
'retweet_count': 105,
'in_reply_to_status_id_str': None,
'id': 239413543487819778,
'geo': None,
'retweeted': False,
'possibly_sensitive': False,
'in_reply_to_user_id': None,
'place': None,
'user': {
'profile_sidebar_fill_color': 'DDEEF6',
'profile_sidebar_border_color': 'C0DEED',
'profile_background_tile': False,
'name': 'Twitter API',
'profile_image_url': 'http://a0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'created_at': 'Wed May 23 06:01:13 +0000 2007',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'profile_link_color': '0084B4',
'is_translator': False,
'id_str': '6253282',
'entities': {
'url': {
'urls': [
{
'expanded_url': None,
'url': '',
'indices': [
0,
22
]
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': True,
'contributors_enabled': True,
'favourites_count': 24,
'url': '',
'profile_image_url_https': 'https://si0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'utc_offset': -28800,
'id': 6253282,
'profile_use_background_image': True,
'listed_count': 10775,
'profile_text_color': '333333',
'lang': 'en',
'followers_count': 1212864,
'protected': False,
'notifications': None,
'profile_background_image_url_https': 'https://si0.twimg.com/images/themes/theme1/bg.png',
'profile_background_color': 'C0DEED',
'verified': True,
'geo_enabled': True,
'time_zone': 'Pacific Time (US & Canada)',
'description': 'The Real Twitter API. I tweet about API changes, service issues and happily answer questions about Twitter and our API. Don\'t get an answer? It\'s on my website.',
'default_profile_image': False,
'profile_background_image_url': 'http://a0.twimg.com/images/themes/theme1/bg.png',
'statuses_count': 3333,
'friends_count': 31,
'following': None,
'show_all_inline_media': False,
'screen_name': 'twitterapi'
},
'in_reply_to_screen_name': None,
'source': 'YoruFukurou',
'in_reply_to_status_id': None
}
]
| mit |
radicalbit/ambari | ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py | 1 | 5636 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import ambari_simplejson as json
from ambari_jinja2 import Environment as JinjaEnvironment
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Directory, File
from resource_management.core.source import InlineTemplate, Template
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.script import Script
def setup_stack_symlinks(struct_out_file):
"""
Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
stack version, such as "2.3". This should always be called after a component has been
installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
interact with this since it's done via a custom command and will not trigger this hook.
:return:
"""
import params
if params.upgrade_suspended:
Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
return
if params.host_sys_prepped:
Logger.warning("Skipping running stack-selector-tool becase this is a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.")
return
# get the packages which the stack-select tool should be used on
stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
if stack_packages is None:
return
json_version = load_version(struct_out_file)
if not json_version:
Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file))
return
# On parallel command execution this should be executed by a single process at a time.
with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
for package in stack_packages:
stack_select.select(package, json_version)
def setup_config():
import params
stackversion = params.stack_version_unformatted
Logger.info("FS Type: {0}".format(params.dfs_type))
is_hadoop_conf_dir_present = False
if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
is_hadoop_conf_dir_present = True
else:
Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
# create core-site only if the hadoop config diretory exists
XmlConfig("core-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
group=params.user_group,
only_if=format("ls {hadoop_conf_dir}"))
Directory(params.logsearch_logfeeder_conf,
mode=0755,
cd_access='a',
create_parents=True
)
if params.logsearch_config_file_exists:
File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
content=Template(params.logsearch_config_file_path,extra_imports=[default])
)
else:
Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
def load_version(struct_out_file):
"""
Load version from file. Made a separate method for testing
"""
json_version = None
try:
if os.path.exists(struct_out_file):
with open(struct_out_file, 'r') as fp:
json_info = json.load(fp)
json_version = json_info['version']
except:
pass
return json_version
def link_configs(struct_out_file):
"""
Links configs, only on a fresh install of HDP-2.3 and higher
"""
import params
json_version = load_version(struct_out_file)
if not json_version:
Logger.info("Could not load 'version' from {0}".format(struct_out_file))
return
# On parallel command execution this should be executed by a single process at a time.
with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
for k, v in conf_select.get_package_dirs().iteritems():
conf_select.convert_conf_directories_to_symlinks(k, json_version, v) | apache-2.0 |
kylerbrown/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
rlr/fjord | vendor/packages/translate-toolkit/translate/storage/xml_extract/test_xpath_breadcrumb.py | 28 | 1256 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
import xpath_breadcrumb
def test_breadcrumb():
xb = xpath_breadcrumb.XPathBreadcrumb()
assert xb.xpath == u''
xb.start_tag(u'a')
assert xb.xpath == u'a[0]'
xb.start_tag(u'b')
assert xb.xpath == u'a[0]/b[0]'
xb.end_tag()
assert xb.xpath == u'a[0]'
xb.start_tag(u'b')
assert xb.xpath == u'a[0]/b[1]'
xb.end_tag()
assert xb.xpath == u'a[0]'
xb.end_tag()
assert xb.xpath == u''
xb.start_tag(u'a')
assert xb.xpath == u'a[1]'
| bsd-3-clause |
ShiYw/Sigil | 3rdparty/python/Lib/test/test_strptime.py | 84 | 26461 | """PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.assertIn(strftime_output, testing,
"%s: not found in tuple" % error_msg)
self.assertEqual(comparison, strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.assertIn(strftime_output, self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.assertTrue(timezone in self.LT_ins.timezone[0] or
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
strftime_output, "LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
strftime_output, "LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
strftime_output, "LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.assertEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.assertIn(r"\\d\+", pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.assertTrue(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.assertTrue(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.assertTrue(found, "Escaping failed of format '\w+ 10'")
def test_locale_data_w_regex_metacharacters(self):
# Check that if locale data contains regex metacharacters they are
# escaped properly.
# Discovered by bug #1039270 .
locale_time = _strptime.LocaleTime()
locale_time.timezone = (frozenset(("utc", "gmt",
"Tokyo (standard time)")),
frozenset("Tokyo (daylight time)"))
time_re = _strptime.TimeRE(locale_time)
self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"),
"locale data that contains regex metacharacters is not"
" properly escaped")
def test_whitespace_substitution(self):
# When pattern contains whitespace, make sure it is taken into account
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
self.assertFalse(re.match(pattern, "180"))
self.assertTrue(re.match(pattern, "18 0"))
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_ValueError(self):
# Make sure ValueError is raised when match fails or format is bad
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%e"):
try:
_strptime._strptime_time("2005", bad_format)
except ValueError:
continue
except Exception as err:
self.fail("'%s' raised %s, not ValueError" %
(bad_format, err.__class__.__name__))
else:
self.fail("'%s' did not raise ValueError" % bad_format)
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%" + directive)
self.assertTrue(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime._strptime_time(bound, '%y')
expected_result = century + int(bound)
self.assertTrue(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%I %p")
self.assertTrue(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_fraction(self):
# Test microseconds
import datetime
d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987)
tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(frac, d.microsecond)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime._strptime_time("UTC", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
strp_output = _strptime._strptime_time("GMT", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime._strptime_time(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.assertTrue(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.assertTrue(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_bad_timezone(self):
# Explicitly test possibility of bad timezone;
# when time.tzname[0] == time.tzname[1] and time.daylight
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
self.skipTest('need non-UTC/GMT timezone')
try:
original_tzname = time.tzname
original_daylight = time.daylight
time.tzname = (tz_name, tz_name)
time.daylight = 1
tz_value = _strptime._strptime_time(tz_name, "%Z")[8]
self.assertEqual(tz_value, -1,
"%s lead to a timezone value of %s instead of -1 when "
"time.daylight set to %s and passing in %s" %
(time.tzname, tz_value, time.daylight, tz_name))
finally:
time.tzname = original_tzname
time.daylight = original_daylight
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%m %% %Y")
self.assertTrue(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime._strptime_time('1', '%m')
self.assertTrue(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
def test_escaping(self):
# Make sure all characters that have regex significance are escaped.
# Parentheses are in a purposeful order; will cause an error of
# unbalanced parentheses when the regex is compiled if they are not
# escaped.
# Test instigated by bug #796149 .
need_escaping = ".^$*+?{}\[]|)("
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
def test_feb29_on_leap_year_without_year(self):
time.strptime("Feb 29", "%b %d")
def test_mar1_comes_after_feb29_even_when_omitting_the_year(self):
self.assertLess(
time.strptime("Feb 29", "%b %d"),
time.strptime("Mar 1", "%b %d"))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12)
eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
# Should be able to infer date if given year, week of year (%U or %W)
# and day of the week
def test_helper(ymd_tuple, test_reason):
for directive in ('W', 'U'):
format_string = "%%Y %%%s %%w" % directive
dt_date = datetime_date(*ymd_tuple)
strp_input = dt_date.strftime(format_string)
strp_output = _strptime._strptime_time(strp_input, format_string)
self.assertTrue(strp_output[:3] == ymd_tuple,
"%s(%s) test failed w/ '%s': %s != %s (%s != %s)" %
(test_reason, directive, strp_input,
strp_output[:3], ymd_tuple,
strp_output[7], dt_date.timetuple()[7]))
test_helper((1901, 1, 3), "week 0")
test_helper((1901, 1, 8), "common case")
test_helper((1901, 1, 13), "day on Sunday")
test_helper((1901, 1, 14), "day on Monday")
test_helper((1905, 1, 1), "Jan 1 on Sunday")
test_helper((1906, 1, 1), "Jan 1 on Monday")
test_helper((1906, 1, 7), "first Sunday in a year starting on Monday")
test_helper((1905, 12, 31), "Dec 31 on Sunday")
test_helper((1906, 12, 31), "Dec 31 on Monday")
test_helper((2008, 12, 29), "Monday in the last week of the year")
test_helper((2008, 12, 22), "Monday in the second-to-last week of the "
"year")
test_helper((1978, 10, 23), "randomly chosen date")
test_helper((2004, 12, 18), "randomly chosen date")
test_helper((1978, 10, 23), "year starting and ending on Monday while "
"date not on Sunday or Monday")
test_helper((1917, 12, 17), "year starting and ending on Monday with "
"a Monday not at the beginning or end "
"of the year")
test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and "
"ending on Monday")
test_helper((2007, 1, 7), "First Sunday of 2007")
test_helper((2007, 1, 14), "Second Sunday of 2007")
test_helper((2006, 12, 31), "Last Sunday of 2006")
test_helper((2006, 12, 24), "Second to last Sunday of 2006")
class CacheTests(unittest.TestCase):
"""Test that caching works properly."""
def test_time_re_recreation(self):
# Make sure cache is recreated when current locale does not match what
# cached object was created with.
_strptime._strptime_time("10", "%d")
_strptime._strptime_time("2005", "%Y")
_strptime._TimeRE_cache.locale_time.lang = "Ni"
original_time_re = _strptime._TimeRE_cache
_strptime._strptime_time("10", "%d")
self.assertIsNot(original_time_re, _strptime._TimeRE_cache)
self.assertEqual(len(_strptime._regex_cache), 1)
def test_regex_cleanup(self):
# Make sure cached regexes are discarded when cache becomes "full".
try:
del _strptime._regex_cache['%d']
except KeyError:
pass
bogus_key = 0
while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE:
_strptime._regex_cache[bogus_key] = None
bogus_key += 1
_strptime._strptime_time("10", "%d")
self.assertEqual(len(_strptime._regex_cache), 1)
def test_new_localetime(self):
# A new LocaleTime instance should be created when a new TimeRE object
# is created.
locale_time_id = _strptime._TimeRE_cache.locale_time
_strptime._TimeRE_cache.locale_time.lang = "Ni"
_strptime._strptime_time("10", "%d")
self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time)
def test_TimeRE_recreation(self):
# The TimeRE instance should be recreated upon changing the locale.
locale_info = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8'))
except locale.Error:
self.skipTest('test needs en_US.UTF8 locale')
try:
_strptime._strptime_time('10', '%d')
# Get id of current cache object.
first_time_re = _strptime._TimeRE_cache
try:
# Change the locale and force a recreation of the cache.
locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8'))
_strptime._strptime_time('10', '%d')
# Get the new cache object's id.
second_time_re = _strptime._TimeRE_cache
# They should not be equal.
self.assertIsNot(first_time_re, second_time_re)
# Possible test locale is not supported while initial locale is.
# If this is the case just suppress the exception and fall-through
# to the resetting to the original locale.
except locale.Error:
self.skipTest('test needs de_DE.UTF8 locale')
# Make sure we don't trample on the locale setting once we leave the
# test.
finally:
locale.setlocale(locale.LC_TIME, locale_info)
def test_main():
support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests
)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
neuroidss/nupic.rogue | avogadro/model_params.py | 2 | 4002 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
MODEL_PARAMS = {
"aggregationInfo": {
"seconds": 0,
"fields": [],
"months": 0,
"days": 0,
"years": 0,
"hours": 0,
"microseconds": 0,
"weeks": 0,
"minutes": 0,
"milliseconds": 0
},
"model": "CLA",
"version": 1,
"predictAheadTime": None,
"modelParams": {
"sensorParams": {
"verbosity": 0,
"encoders": {
"timestamp_dayOfWeek": None,
"timestamp_timeOfDay": {
"type": "DateEncoder",
"timeOfDay": [21, 5.4864773611134598],
"fieldname": "timestamp",
"name": "timestamp"
},
"timestamp_weekend": None
},
"sensorAutoReset": None
},
"spParams": {
"spatialImp": "cpp",
"columnCount": 2048,
"synPermInactiveDec": 0.00065,
"inputWidth": 0,
"spVerbosity": 0,
"synPermActiveInc": 0.001,
"synPermConnected": 0.1,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"globalInhibition": 1,
"maxBoost": 1.0
},
"trainSPNetOnlyIfRequested": False,
"clParams": {
"alpha": 0.0068717199878650798,
"regionName": "CLAClassifierRegion",
"steps": "1",
"clVerbosity": 0
},
"tpParams": {
"columnCount": 2048,
"activationThreshold": 13,
"pamLength": 1,
"cellsPerColumn": 32,
"permanenceInc": 0.1,
"minThreshold": 11,
"verbosity": 0,
"maxSynapsesPerSegment": 32,
"outputType": "normal",
"globalDecay": 0.0,
"initialPerm": 0.21,
"permanenceDec": 0.1,
"seed": 1960,
"maxAge": 0,
"newSynapseCount": 20,
"maxSegmentsPerCell": 128,
"temporalImp": "cpp",
"inputWidth": 2048
},
"anomalyParams": {
"anomalyCacheRecords": None,
"autoDetectThreshold": None,
"autoDetectWaitRecords": 5030
},
"spEnable": True,
"inferenceType": "TemporalAnomaly",
"tpEnable": True,
"clEnable": False
}
}
def getModelParams(encoderParams, predictedField):
"""
Creates a model params dict that includes the encoder params for the
specific model.
:param encoderParams: A dict containing the encoder parameters for the
specified predicted field. For example:
{
u"CPUPercent": {
u"name": u"CPUPercent",
"fieldname": u"CPUPercent",
"resolution": 0.3521126761,
"seed": 42,
"type": "RandomDistributedScalarEncoder"
}
}
NOTE: The fieldname, name and parent value must all be the same (e.g.,
CPUPercent)
:param predictedField: A `string` representing the name of the
predictedField. This should match exactly the `fieldname` in the encoder
params
:returns: A `dict` with all of the relevant model parameters
:rtype: dict
"""
thisModel = copy.deepcopy(MODEL_PARAMS)
thisModel["modelParams"]["sensorParams"]["encoders"][predictedField] = (
encoderParams[predictedField])
return thisModel
| agpl-3.0 |
StefanRijnhart/purchase-workflow | __unported__/purchase_landed_costs/__init__.py | 27 | 1178 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2013 Camptocamp (<http://www.camptocamp.com>)
# Authors: Ferdinand Gasauer, Joel Grand-Guillaume
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import product
from . import purchase
from . import stock
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ENjOyAbLE1991/scrapy | scrapy/http/headers.py | 132 | 2329 | import six
from w3lib.http import headers_dict_to_raw
from scrapy.utils.datatypes import CaselessDict
class Headers(CaselessDict):
"""Case insensitive http headers dictionary"""
def __init__(self, seq=None, encoding='utf-8'):
self.encoding = encoding
super(Headers, self).__init__(seq)
def normkey(self, key):
"""Normalize key to bytes"""
return self._tobytes(key.title())
def normvalue(self, value):
"""Normalize values to bytes"""
if value is None:
value = []
elif isinstance(value, (six.text_type, bytes)):
value = [value]
elif not hasattr(value, '__iter__'):
value = [value]
return [self._tobytes(x) for x in value]
def _tobytes(self, x):
if isinstance(x, bytes):
return x
elif isinstance(x, six.text_type):
return x.encode(self.encoding)
elif isinstance(x, int):
return six.text_type(x).encode(self.encoding)
else:
raise TypeError('Unsupported value type: {}'.format(type(x)))
def __getitem__(self, key):
try:
return super(Headers, self).__getitem__(key)[-1]
except IndexError:
return None
def get(self, key, def_val=None):
try:
return super(Headers, self).get(key, def_val)[-1]
except IndexError:
return None
def getlist(self, key, def_val=None):
try:
return super(Headers, self).__getitem__(key)
except KeyError:
if def_val is not None:
return self.normvalue(def_val)
return []
def setlist(self, key, list_):
self[key] = list_
def setlistdefault(self, key, default_list=()):
return self.setdefault(key, default_list)
def appendlist(self, key, value):
lst = self.getlist(key)
lst.extend(self.normvalue(value))
self[key] = lst
def items(self):
return list(self.iteritems())
def iteritems(self):
return ((k, self.getlist(k)) for k in self.keys())
def values(self):
return [self[k] for k in self.keys()]
def to_string(self):
return headers_dict_to_raw(self)
def __copy__(self):
return self.__class__(self)
copy = __copy__
| bsd-3-clause |
dichenko/kpk2016 | Diff/111879.py | 1 | 1032 | #Дана возрастающая последовательность целых чисел 1, 2, 4, 5, 7, 9, 10, 12, 14, 16, 17, ...
# Она сформирована следующим образом: берется одно нечетное число, затем два четных,
# затем три нечетных и так далее. Выведите N-й элемент этой последовательности.
def next_chet(x):
if x % 2 == 0:
return x + 2
else: return x + 1
def next_nechet(x):
return x + 1 + x % 2
def is_chet(x):
return x % 2 == 0
n = int(input())
D = [1]
data = D[0]
counter = 1
while len(D) < n:
counter += 1
if is_chet(counter):
for i in range(counter):
k = next_chet(data)
D.append(k)
data = k
if len(D) >= n: break
else:
for i in range(counter):
k = next_nechet(data)
D.append(k)
data = k
if len(D) >= n: break
print(D[-1])
| gpl-3.0 |
edcast-inc/edx-platform-edcast | lms/djangoapps/teams/tests/test_views.py | 18 | 31646 | # -*- coding: utf-8 -*-
"""Tests for the teams API at the HTTP request level."""
# pylint: disable=maybe-no-member
import json
import ddt
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from rest_framework.test import APITestCase, APIClient
from courseware.tests.factories import StaffFactory
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from xmodule.modulestore.tests.factories import CourseFactory
from .factories import CourseTeamFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@attr('shard_1')
class TestDashboard(ModuleStoreTestCase):
"""Tests for the Teams dashboard."""
test_password = "test"
def setUp(self):
"""
Set up tests
"""
super(TestDashboard, self).setUp()
self.course = CourseFactory.create(
teams_configuration={"max_team_size": 10, "topics": [{"name": "foo", "id": 0, "description": "test topic"}]}
)
# will be assigned to self.client by default
self.user = UserFactory.create(password=self.test_password)
self.teams_url = reverse('teams_dashboard', args=[self.course.id])
def test_anonymous(self):
""" Verifies that an anonymous client cannot access the team dashboard. """
anonymous_client = APIClient()
response = anonymous_client.get(self.teams_url)
self.assertEqual(404, response.status_code)
def test_not_enrolled_not_staff(self):
""" Verifies that a student who is not enrolled cannot access the team dashboard. """
response = self.client.get(self.teams_url)
self.assertEqual(404, response.status_code)
def test_not_enrolled_staff(self):
"""
Verifies that a user with global access who is not enrolled in the course can access the team dashboard.
"""
staff_user = UserFactory(is_staff=True, password=self.test_password)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=self.test_password)
response = staff_client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_not_staff(self):
"""
Verifies that a user without global access who is enrolled in the course can access the team dashboard.
"""
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_teams_not_enabled(self):
"""
Verifies that a user without global access who is enrolled in the course cannot access the team dashboard
if the teams feature is not enabled.
"""
course = CourseFactory.create()
teams_url = reverse('teams_dashboard', args=[course.id])
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(teams_url)
self.assertEqual(404, response.status_code)
def test_bad_course_id(self):
"""
Verifies expected behavior when course_id does not reference an existing course or is invalid.
"""
bad_org = "badorgxxx"
bad_team_url = self.teams_url.replace(self.course.id.org, bad_org)
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
bad_team_url = bad_team_url.replace(bad_org, "invalid/course/id")
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
class TeamAPITestCase(APITestCase, ModuleStoreTestCase):
"""Base class for Team API test cases."""
test_password = 'password'
def setUp(self):
super(TeamAPITestCase, self).setUp()
teams_configuration = {
'topics':
[
{
'id': 'topic_{}'.format(i),
'name': name,
'description': 'Description for topic {}.'.format(i)
} for i, name in enumerate([u'sólar power', 'Wind Power', 'Nuclear Power', 'Coal Power'])
]
}
self.topics_count = 4
self.test_course_1 = CourseFactory.create(
org='TestX',
course='TS101',
display_name='Test Course',
teams_configuration=teams_configuration
)
self.test_course_2 = CourseFactory.create(org='MIT', course='6.002x', display_name='Circuits')
self.users = {
'student_unenrolled': UserFactory.create(password=self.test_password),
'student_enrolled': UserFactory.create(password=self.test_password),
'student_enrolled_not_on_team': UserFactory.create(password=self.test_password),
# This student is enrolled in both test courses and is a member of a team in each course, but is not on the
# same team as student_enrolled.
'student_enrolled_both_courses_other_team': UserFactory.create(password=self.test_password),
'staff': AdminFactory.create(password=self.test_password),
'course_staff': StaffFactory.create(course_key=self.test_course_1.id, password=self.test_password)
}
# 'solar team' is intentionally lower case to test case insensitivity in name ordering
self.test_team_1 = CourseTeamFactory.create(
name=u'sólar team',
course_id=self.test_course_1.id,
topic_id='renewable'
)
self.test_team_2 = CourseTeamFactory.create(name='Wind Team', course_id=self.test_course_1.id)
self.test_team_3 = CourseTeamFactory.create(name='Nuclear Team', course_id=self.test_course_1.id)
self.test_team_4 = CourseTeamFactory.create(name='Coal Team', course_id=self.test_course_1.id, is_active=False)
self.test_team_4 = CourseTeamFactory.create(name='Another Team', course_id=self.test_course_2.id)
for user, course in [
('student_enrolled', self.test_course_1),
('student_enrolled_not_on_team', self.test_course_1),
('student_enrolled_both_courses_other_team', self.test_course_1),
('student_enrolled_both_courses_other_team', self.test_course_2)
]:
CourseEnrollment.enroll(
self.users[user], course.id, check_access=True
)
self.test_team_1.add_user(self.users['student_enrolled'])
self.test_team_3.add_user(self.users['student_enrolled_both_courses_other_team'])
self.test_team_4.add_user(self.users['student_enrolled_both_courses_other_team'])
def login(self, user):
"""Given a user string, logs the given user in.
Used for testing with ddt, which does not have access to self in
decorators. If user is 'student_inactive', then an inactive user will
be both created and logged in.
"""
if user == 'student_inactive':
student_inactive = UserFactory.create(password=self.test_password)
self.client.login(username=student_inactive.username, password=self.test_password)
student_inactive.is_active = False
student_inactive.save()
else:
self.client.login(username=self.users[user].username, password=self.test_password)
def make_call(self, url, expected_status=200, method='get', data=None, content_type=None, **kwargs):
"""Makes a call to the Team API at the given url with method and data.
If a user is specified in kwargs, that user is first logged in.
"""
user = kwargs.pop('user', 'student_enrolled')
if user:
self.login(user)
func = getattr(self.client, method)
if content_type:
response = func(url, data=data, content_type=content_type)
else:
response = func(url, data=data)
self.assertEqual(expected_status, response.status_code)
if expected_status == 200:
return json.loads(response.content)
else:
return response
def get_teams_list(self, expected_status=200, data=None, no_course_id=False, **kwargs):
"""Gets the list of teams as the given user with data as query params. Verifies expected_status."""
data = data if data else {}
if 'course_id' not in data and not no_course_id:
data.update({'course_id': self.test_course_1.id})
return self.make_call(reverse('teams_list'), expected_status, 'get', data, **kwargs)
def build_team_data(self, name="Test team", course=None, description="Filler description", **kwargs):
"""Creates the payload for creating a team. kwargs can be used to specify additional fields."""
data = kwargs
course = course if course else self.test_course_1
data.update({
'name': name,
'course_id': str(course.id),
'description': description,
})
return data
def post_create_team(self, expected_status=200, data=None, **kwargs):
"""Posts data to the team creation endpoint. Verifies expected_status."""
return self.make_call(reverse('teams_list'), expected_status, 'post', data, **kwargs)
def get_team_detail(self, team_id, expected_status=200, data=None, **kwargs):
"""Gets detailed team information for team_id. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'get', data, **kwargs)
def patch_team_detail(self, team_id, expected_status, data=None, **kwargs):
"""Patches the team with team_id using data. Verifies expected_status."""
return self.make_call(
reverse('teams_detail', args=[team_id]),
expected_status,
'patch',
json.dumps(data) if data else None,
'application/merge-patch+json',
**kwargs
)
def get_topics_list(self, expected_status=200, data=None, **kwargs):
"""Gets the list of topics, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('topics_list'), expected_status, 'get', data, **kwargs)
def get_topic_detail(self, topic_id, course_id, expected_status=200, data=None, **kwargs):
"""Gets a single topic, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('topics_detail', kwargs={'topic_id': topic_id, 'course_id': str(course_id)}),
expected_status,
'get',
data,
**kwargs
)
def get_membership_list(self, expected_status=200, data=None, **kwargs):
"""Gets the membership list, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'get', data, **kwargs)
def post_create_membership(self, expected_status=200, data=None, **kwargs):
"""Posts data to the membership creation endpoint. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'post', data, **kwargs)
def get_membership_detail(self, team_id, username, expected_status=200, data=None, **kwargs):
"""Gets an individual membership record, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('team_membership_detail', args=[team_id, username]),
expected_status,
'get',
data,
**kwargs
)
def delete_membership(self, team_id, username, expected_status=200, **kwargs):
"""Deletes an individual membership record. Verifies expected_status."""
return self.make_call(
reverse('team_membership_detail', args=[team_id, username]),
expected_status,
'delete',
**kwargs
)
def verify_expanded_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['id', 'url', 'email', 'name', 'username', 'preferences']:
self.assertIn(field, user)
def verify_expanded_team(self, team):
"""Verifies that fields exist on the returned team json indicating that it is expanded."""
for field in ['id', 'name', 'is_active', 'course_id', 'topic_id', 'date_created', 'description']:
self.assertIn(field, team)
@ddt.ddt
class TestListTeamsAPI(TeamAPITestCase):
"""Test cases for the team listing API endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
teams = self.get_teams_list(user=user, expected_status=status)
if status == 200:
self.assertEqual(3, teams['count'])
def test_missing_course_id(self):
self.get_teams_list(400, no_course_id=True)
def verify_names(self, data, status, names=None, **kwargs):
"""Gets a team listing with data as query params, verifies status, and then verifies team names if specified."""
teams = self.get_teams_list(data=data, expected_status=status, **kwargs)
if names:
self.assertEqual(names, [team['name'] for team in teams['results']])
def test_filter_invalid_course_id(self):
self.verify_names({'course_id': 'no_such_course'}, 400)
def test_filter_course_id(self):
self.verify_names({'course_id': self.test_course_2.id}, 200, ['Another Team'], user='staff')
def test_filter_topic_id(self):
self.verify_names({'course_id': self.test_course_1.id, 'topic_id': 'renewable'}, 200, [u'sólar team'])
def test_filter_include_inactive(self):
self.verify_names({'include_inactive': True}, 200, ['Coal Team', 'Nuclear Team', u'sólar team', 'Wind Team'])
# Text search is not yet implemented, so this should return HTTP
# 400 for now
def test_filter_text_search(self):
self.verify_names({'text_search': 'foobar'}, 400)
@ddt.data(
(None, 200, ['Nuclear Team', u'sólar team', 'Wind Team']),
('name', 200, ['Nuclear Team', u'sólar team', 'Wind Team']),
('open_slots', 200, ['Wind Team', u'sólar team', 'Nuclear Team']),
('last_activity', 400, []),
)
@ddt.unpack
def test_order_by(self, field, status, names):
data = {'order_by': field} if field else {}
self.verify_names(data, status, names)
@ddt.data({'course_id': 'no/such/course'}, {'topic_id': 'no_such_topic'})
def test_no_results(self, data):
self.get_teams_list(404, data)
def test_page_size(self):
result = self.get_teams_list(200, {'page_size': 2})
self.assertEquals(2, result['num_pages'])
def test_page(self):
result = self.get_teams_list(200, {'page_size': 1, 'page': 3})
self.assertEquals(3, result['num_pages'])
self.assertIsNone(result['next'])
self.assertIsNotNone(result['previous'])
def test_expand_user(self):
result = self.get_teams_list(200, {'expand': 'user', 'topic_id': 'renewable'})
self.verify_expanded_user(result['results'][0]['membership'][0]['user'])
@ddt.ddt
class TestCreateTeamAPI(TeamAPITestCase):
"""Test cases for the team creation endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200)
)
@ddt.unpack
def test_access(self, user, status):
team = self.post_create_team(status, self.build_team_data(name="New Team"), user=user)
if status == 200:
self.assertEqual(team['id'], 'new-team')
teams = self.get_teams_list(user=user)
self.assertIn("New Team", [team['name'] for team in teams['results']])
def test_naming(self):
new_teams = [
self.post_create_team(data=self.build_team_data(name=name))
for name in ["The Best Team", "The Best Team", "The Best Team", "The Best Team 2"]
]
self.assertEquals(
[team['id'] for team in new_teams],
['the-best-team', 'the-best-team-2', 'the-best-team-3', 'the-best-team-2-2']
)
@ddt.data((400, {
'name': 'Bad Course Id',
'course_id': 'no_such_course',
'description': "Filler Description"
}), (404, {
'name': "Non-existent course id",
'course_id': 'no/such/course',
'description': "Filler Description"
}))
@ddt.unpack
def test_bad_course_data(self, status, data):
self.post_create_team(status, data)
def test_missing_name(self):
self.post_create_team(400, {
'course_id': str(self.test_course_1.id),
'description': "foobar"
})
@ddt.data({'description': ''}, {'name': 'x' * 1000}, {'name': ''})
def test_bad_fields(self, kwargs):
self.post_create_team(400, self.build_team_data(**kwargs))
def test_full(self):
team = self.post_create_team(data=self.build_team_data(
name="Fully specified team",
course=self.test_course_1,
description="Another fantastic team",
topic_id='great-topic',
country='CA',
language='fr'
))
# Remove date_created because it changes between test runs
del team['date_created']
self.assertEquals(team, {
'name': 'Fully specified team',
'language': 'fr',
'country': 'CA',
'is_active': True,
'membership': [],
'topic_id': 'great-topic',
'course_id': str(self.test_course_1.id),
'id': 'fully-specified-team',
'description': 'Another fantastic team'
})
@ddt.ddt
class TestDetailTeamAPI(TeamAPITestCase):
"""Test cases for the team detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.get_team_detail(self.test_team_1.team_id, status, user=user)
if status == 200:
self.assertEquals(team['description'], self.test_team_1.description)
def test_does_not_exist(self):
self.get_team_detail('no_such_team', 404)
def test_expand_user(self):
result = self.get_team_detail(self.test_team_1.team_id, 200, {'expand': 'user'})
self.verify_expanded_user(result['membership'][0]['user'])
@ddt.ddt
class TestUpdateTeamAPI(TeamAPITestCase):
"""Test cases for the team update endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.patch_team_detail(self.test_team_1.team_id, status, {'name': 'foo'}, user=user)
if status == 200:
self.assertEquals(team['name'], 'foo')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 404),
('staff', 404),
('course_staff', 404),
)
@ddt.unpack
def test_access_bad_id(self, user, status):
self.patch_team_detail("no_such_team", status, {'name': 'foo'}, user=user)
@ddt.data(
('id', 'foobar'),
('description', ''),
('country', 'no_such_country'),
('language', 'no_such_language')
)
@ddt.unpack
def test_bad_requests(self, key, value):
self.patch_team_detail(self.test_team_1.team_id, 400, {key: value}, user='staff')
@ddt.data(('country', 'US'), ('language', 'en'), ('foo', 'bar'))
@ddt.unpack
def test_good_requests(self, key, value):
self.patch_team_detail(self.test_team_1.team_id, 200, {key: value}, user='staff')
def test_does_not_exist(self):
self.patch_team_detail('no_such_team', 404, user='staff')
@ddt.ddt
class TestListTopicsAPI(TeamAPITestCase):
"""Test cases for the topic listing endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
topics = self.get_topics_list(status, {'course_id': self.test_course_1.id}, user=user)
if status == 200:
self.assertEqual(topics['count'], self.topics_count)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_key(self, course_id):
self.get_topics_list(404, {'course_id': course_id})
def test_without_course_id(self):
self.get_topics_list(400)
@ddt.data(
(None, 200, ['Coal Power', 'Nuclear Power', u'sólar power', 'Wind Power'], 'name'),
('name', 200, ['Coal Power', 'Nuclear Power', u'sólar power', 'Wind Power'], 'name'),
('no_such_field', 400, [], None),
)
@ddt.unpack
def test_order_by(self, field, status, names, expected_ordering):
data = {'course_id': self.test_course_1.id}
if field:
data['order_by'] = field
topics = self.get_topics_list(status, data)
if status == 200:
self.assertEqual(names, [topic['name'] for topic in topics['results']])
self.assertEqual(topics['sort_order'], expected_ordering)
def test_pagination(self):
response = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
})
self.assertEqual(2, len(response['results']))
self.assertIn('next', response)
self.assertIn('previous', response)
self.assertIsNone(response['previous'])
self.assertIsNotNone(response['next'])
def test_default_ordering(self):
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
self.assertEqual(response['sort_order'], 'name')
@ddt.ddt
class TestDetailTopicAPI(TeamAPITestCase):
"""Test cases for the topic detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
topic = self.get_topic_detail('topic_0', self.test_course_1.id, status, user=user)
if status == 200:
for field in ('id', 'name', 'description'):
self.assertIn(field, topic)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_id(self, course_id):
self.get_topic_detail('topic_0', course_id, 404)
def test_invalid_topic_id(self):
self.get_topic_detail('no_such_topic', self.test_course_1.id, 404)
@ddt.ddt
class TestListMembershipAPI(TeamAPITestCase):
"""Test cases for the membership list endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 200),
('student_enrolled_both_courses_other_team', 200),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.get_membership_list(status, {'team_id': self.test_team_1.team_id}, user=user)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['user']['id'], self.users['student_enrolled'].username)
@ddt.data(
(None, 401, False),
('student_inactive', 401, False),
('student_unenrolled', 200, False),
('student_enrolled', 200, True),
('student_enrolled_both_courses_other_team', 200, True),
('staff', 200, True),
('course_staff', 200, True),
)
@ddt.unpack
def test_access_by_username(self, user, status, has_content):
membership = self.get_membership_list(status, {'username': self.users['student_enrolled'].username}, user=user)
if status == 200:
if has_content:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['id'], self.test_team_1.team_id)
else:
self.assertEqual(membership['count'], 0)
def test_no_username_or_team_id(self):
self.get_membership_list(400, {})
def test_bad_team_id(self):
self.get_membership_list(404, {'team_id': 'no_such_team'})
def test_expand_user(self):
result = self.get_membership_list(200, {'team_id': self.test_team_1.team_id, 'expand': 'user'})
self.verify_expanded_user(result['results'][0]['user'])
def test_expand_team(self):
result = self.get_membership_list(200, {'team_id': self.test_team_1.team_id, 'expand': 'team'})
self.verify_expanded_team(result['results'][0]['team'])
@ddt.ddt
class TestCreateMembershipAPI(TeamAPITestCase):
"""Test cases for the membership creation endpoint."""
def build_membership_data_raw(self, username, team):
"""Assembles a membership creation payload based on the raw values provided."""
return {'username': username, 'team_id': team}
def build_membership_data(self, username, team):
"""Assembles a membership creation payload based on the username and team model provided."""
return self.build_membership_data_raw(self.users[username].username, team.team_id)
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 404),
('student_enrolled_both_courses_other_team', 404),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.post_create_membership(
status,
self.build_membership_data('student_enrolled_not_on_team', self.test_team_1),
user=user
)
if status == 200:
self.assertEqual(membership['user']['id'], self.users['student_enrolled_not_on_team'].username)
self.assertEqual(membership['team']['id'], self.test_team_1.team_id)
memberships = self.get_membership_list(200, {'team_id': self.test_team_1.team_id})
self.assertEqual(memberships['count'], 2)
def test_no_username(self):
response = self.post_create_membership(400, {'team_id': self.test_team_1.team_id})
self.assertIn('username', json.loads(response.content)['field_errors'])
def test_no_team(self):
response = self.post_create_membership(400, {'username': self.users['student_enrolled_not_on_team'].username})
self.assertIn('team_id', json.loads(response.content)['field_errors'])
def test_bad_team(self):
self.post_create_membership(
404,
self.build_membership_data_raw(self.users['student_enrolled'].username, 'no_such_team')
)
def test_bad_username(self):
self.post_create_membership(
404,
self.build_membership_data_raw('no_such_user', self.test_team_1.team_id),
user='staff'
)
@ddt.data('student_enrolled', 'staff', 'course_staff')
def test_join_twice(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled', self.test_team_1),
user=user
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
def test_join_second_team_in_course(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_both_courses_other_team', self.test_team_1),
user='student_enrolled_both_courses_other_team'
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
@ddt.data('staff', 'course_staff')
def test_not_enrolled_in_team_course(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_unenrolled', self.test_team_1),
user=user
)
self.assertIn('not enrolled', json.loads(response.content)['developer_message'])
@ddt.ddt
class TestDetailMembershipAPI(TeamAPITestCase):
"""Test cases for the membership detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
)
@ddt.unpack
def test_access(self, user, status):
self.get_membership_detail(
self.test_team_1.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
def test_bad_team(self):
self.get_membership_detail('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.get_membership_detail(self.test_team_1.team_id, 'no_such_user', 404)
def test_no_membership(self):
self.get_membership_detail(
self.test_team_1.team_id,
self.users['student_enrolled_not_on_team'].username,
404
)
def test_expand_user(self):
result = self.get_membership_detail(
self.test_team_1.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'user'}
)
self.verify_expanded_user(result['user'])
def test_expand_team(self):
result = self.get_membership_detail(
self.test_team_1.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'team'}
)
self.verify_expanded_team(result['team'])
@ddt.ddt
class TestDeleteMembershipAPI(TeamAPITestCase):
"""Test cases for the membership deletion endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 404),
('student_enrolled', 204),
('staff', 204),
('course_staff', 204),
)
@ddt.unpack
def test_access(self, user, status):
self.delete_membership(
self.test_team_1.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
def test_bad_team(self):
self.delete_membership('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.delete_membership(self.test_team_1.team_id, 'no_such_user', 404)
def test_missing_membership(self):
self.delete_membership(self.test_team_2.team_id, self.users['student_enrolled'].username, 404)
| agpl-3.0 |
nsinha17/incubator-metron | metron-sensors/pycapa/pycapa/pycapa_cli.py | 5 | 2898 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from producer import producer
from consumer import consumer
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-p',
'--producer',
help='sniff packets and send to kafka',
dest='producer',
action='store_true',
default=False)
parser.add_argument('-c',
'--consumer',
help='read packets from kafka',
dest='consumer',
action='store_true',
default=False)
parser.add_argument('-k',
'--kafka',
help='kafka broker(s)',
dest='kafka_brokers')
parser.add_argument('-t',
'--topic',
help='kafka topic',
dest='topic')
parser.add_argument('-n',
'--number',
help='number of packets to consume',
dest='packet_count',
type=int)
parser.add_argument('-d',
'--debug',
help='debug every X packets',
dest='debug',
type=int,
default=0)
parser.add_argument('-i',
'--interface',
help='interface to listen on',
dest='interface')
return parser
def valid_args(args):
if args.producer and args.kafka_brokers and args.topic and args.interface:
return True
elif args.consumer and args.kafka_brokers and args.topic:
return True
else:
return False
def main():
parser = make_parser()
args = parser.parse_args()
if not valid_args(args):
parser.print_help()
elif args.consumer:
consumer(args)
elif args.producer:
producer(args)
else:
parser.print_help()
if __name__ == '__main__':
main()
| apache-2.0 |
kapsiry/sikteeri | membership/billing/payments.py | 2 | 9093 | # encoding: utf-8
import csv
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
import logging
from membership.models import BillingCycle, Payment
from membership.utils import log_change
from decimal import Decimal
logger = logging.getLogger(__name__)
class PaymentFromFutureException(Exception):
pass
class RequiredFieldNotFoundException(Exception):
pass
class DuplicateColumnException(Exception):
pass
class BillDictReader(csv.DictReader):
REQUIRED_COLUMNS = ['date', 'amount', 'transaction']
CSV_TRANSLATION = {}
def __init__(self, f, delimiter=';', *args, **kw):
csv.DictReader.__init__(self, f, delimiter=delimiter, *args, **kw)
# Translate headers
h = self.fieldnames
for i in range(0, len(h)):
self.fieldnames[i] = self._get_translation(h[i])
# Check that all required columns exist in the header
for name in self.REQUIRED_COLUMNS:
if name not in self.fieldnames:
error = "CSV format is invalid: missing field '%s'." % name
raise RequiredFieldNotFoundException(error)
# Check that each field is unique
for name in self.fieldnames:
if self.fieldnames.count(name) != 1:
error = "The field '%s' occurs multiple times in the header"
raise DuplicateColumnException(error)
def _get_translation(self, h):
"""
Function for custom translations
"""
return self.CSV_TRANSLATION.get(h, h)
def _get_row(self, row):
"""
Function for custom data processing
"""
return row
def __next__(self):
row = self._get_row(csv.DictReader.__next__(self))
if len(row) == 0:
return None
row['amount'] = Decimal(row['amount'].replace(",", "."))
row['date'] = datetime.strptime(row['date'], "%d.%m.%Y")
row['reference'] = row['reference'].replace(' ', '').lstrip('0')
row['transaction'] = row['transaction'].replace(' ', '').replace('/', '')
if 'value_date' in row:
row['value_date'] = datetime.strptime(row['value_date'], "%d.%m.%Y")
return row
class OpDictReader(BillDictReader):
"""Reader for Osuuspankki CSV file format
The module converts Osuuspankki CSV format data into a more usable form."""
# If these fields are not found on the first line, an exception is raised
REQUIRED_COLUMNS = ['date', 'amount', 'transaction']
# Translation table from Osuuspankki CSV format to short names
OP_CSV_TRANSLATION = {'Kirjauspäivä' : 'date',
'Arvopäivä' : 'value_date',
'Tap.pv' : 'date', # old format
'Määrä EUROA' : 'amount',
'Määrä' : 'amount',
'Tapahtumalajikoodi' : 'event_type_code',
'Selitys' : 'event_type_description',
'Saaja/Maksaja' : 'fromto',
'Saajan tilinumero' : 'account', # old format
'Saajan tilinumero ja pankin BIC' : 'account',
'Viite' : 'reference',
'Viesti' : 'message',
'Arkistotunnus' : 'transaction', # old format
'Arkistointitunnus' : 'transaction'}
def _get_translation(self, h):
# Quick and dirty, OP changes this field name too often!
if h.startswith("Määrä"):
return "amount"
return self.OP_CSV_TRANSLATION.get(h, h)
class ProcountorDictReader(BillDictReader):
REQUIRED_COLUMNS = ['date', 'amount', 'transaction']
CSV_TRANSLATION = {'Kirjauspäivä' : 'date',
'Arvopäivä' : 'value_date',
'Maksupäivä' : 'date',
'Maksu' : 'amount',
'Summa' : 'amount',
'Kirjausselite' : 'event_type_description',
'Maksaja' : 'fromto',
'Nimi' : 'fromto',
'Tilinro' : 'account',
'Viesti' : 'message',
'Viitenumero' : 'reference',
'Arkistointitunnus' : 'transaction',
'Oikea viite' : 'real_reference',
}
def _get_row(self, row):
if 'real_reference' in row:
row['reference'] = row['real_reference']
return row
def process_op_csv(file_handle, user=None):
logger.info("Starting OP payment CSV processing...")
reader = OpDictReader(file_handle)
return process_payments(reader)
def process_procountor_csv(file_handle, user=None):
logger.info("Starting procountor payment CSV processing...")
reader = ProcountorDictReader(file_handle)
return process_payments(reader)
def attach_payment_to_cycle(payment, user=None):
"""
Outside of this module, this function is mainly used by
generate_test_data.py.
"""
if payment.ignore is True or payment.billingcycle is not None:
raise Exception("Unexpected function call. This shouldn't happen.")
reference = payment.reference_number
cycle = BillingCycle.objects.get(reference_number=reference)
if cycle.is_paid is False or cycle.amount_paid() < cycle.sum:
payment.attach_to_cycle(cycle, user=user)
else:
# Don't attach a payment to a cycle with enough payments
payment.comment = _('duplicate payment')
payment.duplicate = True
log_user = User.objects.get(id=1)
log_change(payment, log_user, change_message="Payment not attached due to duplicate payment")
payment.save()
return None
return cycle
def row_to_payment(row):
try:
p = Payment.objects.get(transaction_id__exact=row['transaction'])
return p
except Payment.DoesNotExist:
p = Payment(payment_day=min(datetime.now(), row['date']),
amount=row['amount'],
type=row['event_type_description'],
payer_name=row['fromto'],
reference_number=row['reference'],
message=row['message'][:255],
transaction_id=row['transaction'])
return p
def process_payments(reader, user=None):
"""
Attach bank account events from reader to payments
"""
return_messages = []
num_attached = num_notattached = 0
sum_attached = sum_notattached = 0
for row in reader:
if row is None:
continue
if row['amount'] < 0: # Transaction is paid by us, ignored
continue
# Payment in future more than 1 day is a fatal error
if row['date'] > datetime.now() + timedelta(days=1):
raise PaymentFromFutureException("Payment date in future")
payment = row_to_payment(row)
# Do nothing if this payment has already been assigned or ignored
if payment.billingcycle or payment.ignore:
continue
try:
cycle = attach_payment_to_cycle(payment, user=user)
if cycle:
msg = _("Attached payment %(payment)s to cycle %(cycle)s") % {
'payment': str(payment), 'cycle': str(cycle)}
logger.info(msg)
return_messages.append((None, None, msg))
num_attached = num_attached + 1
sum_attached = sum_attached + payment.amount
else:
# Payment not attached to cycle because enough payments were attached
msg = _("Billing cycle already paid for %s. Payment not attached.") % payment
return_messages.append((None, None, msg))
logger.info(msg)
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
except BillingCycle.DoesNotExist:
# Failed to find cycle for this reference number
if not payment.id:
payment.save() # Only save if object not in database yet
logger.warning("No billing cycle found for %s" % payment.reference_number)
return_messages.append((None, payment.id, _("No billing cycle found for %s") % payment))
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
log_message = "Processed %s payments total %.2f EUR. Unidentified payments: %s (%.2f EUR)" % (
num_attached + num_notattached, sum_attached + sum_notattached, num_notattached,
sum_notattached)
logger.info(log_message)
return_messages.append((None, None, log_message))
return return_messages
| mit |
achim1/HTCTools | htctools/batchfarming.py | 1 | 33073 | """
Management of Zeuthen Batch farmjobs
"""
import sys
#
#import shlex
#import sys
#import tarfile
#import time
#import logging
#import zmq
#import os
#import sqlite3
#import re
#
#from .. import CONFIG, Logger
#from .metascript import MetaScript
#from .utils import ListSlicer, RotatingFileNamer, GetEnvshells, ExitFunc, CheckBzipIntegrity
#from .databases import OpenSQLiteDB, CloseSQLiteDB
#import databases as file_db
#
#
#from os.path import join,split
#from subprocess import Popen,PIPE
#from glob import glob
#from copy import copy
#from time import sleep
#
##########################################
#
#class SubmissionError(Exception):
# pass
#
##########################################
#
#def JobPropertiesFactory(jobtype):
# """
# Return predefined sets of JobProperties
# """
# props = JobProperties()
# if jobtype == "long_run_high_mem":
# props.fortyeighthour_queue()
# props.request_ram(10000)
# props.as_sl6()
# else:
# raise ValueError("Please give a keyword for a certain job settings preset")
# return props
#
#
####################################################
#
#class JobProperties(object):
# """
# A wrapper for numerous job properties
# """
# def __init__(self):
# self.mailoption = ''
# self.ready_for_submission = False
# self.ressources = {'hcpu':'h_cpu=47:59:59','hvmem':'h_rss=2000M'} # parameter for -l
# self.shelltype = '/bin/zsh' # parameter for -S
# self.finished = False
# self.project = "icecube"
# self.arrayjob = False
# self.usetmpdir = False
# self.envshell = self.choose_envshell("V04-04-01")
# self.logdir = "/dev/null"
#
# def specify_tmpdir_size(self,megabytes):
# """
# Request the size of $TMP
# """
# self.ressources['tmpdir'] = 'tmpdir_size=' + str(megabytes) + 'M '
# self.usetmpdir = True
#
# def send_mail(self,mailoption):
# """
# mailoption could be either 'b','e' or 'a'
# """
# self.mailoption = mailoption
#
# def as_nuastro(self):
# """
# Set the project to z_nuastr
# """
# self.project = "z_nuastr"
#
# def short_queue(self):
# """
# Submit for short queue
# """
# self.ressources['hcpu']='h_cpu=00:29:59'
#
# def specify_logdir(self,directory):
# """
# Giva a name for the logfile
# """
# self.logdir = directory
#
# def as_sl6(self):
# """
# Select nodes with Scientific Linux 6
# """
# self.ressources['os'] = 'os=sl6'
#
# def as_sl5(self):
# """
# Select nodes with Scientific Linux 5
# """
# self.ressources['os'] = 'os=sl5'
#
# def as_arrayjob(self,minarray,maxarray):
# """
# Run job as arrayjob in range min, max
# """
# self.arrayjob = (minarray,maxarray)
#
# def request_gpu(self):
# """
# Run job on gpu nodes
# """
# self.ressources['gpu'] = "gpu"
#
# def specify_hcpu(self,hours,minutes):
# """
# Sel hard limit for cpu time
# """
# self.ressources['hcpu']="h_cpu=" + str(hours) + ":" + str(minutes) + ":59"
#
# def request_ram(self,megabites):
# """
# Set hard limit for ram
# """
# self.ressources['h_rss'] ="h_rss="+str(megabites)+"M"
#
# def twelvehour_queue(self):
# """
# Run job in 12 h queue
# """
# self.ressources['hcpu']='h_cpu=11:59:59'
#
# def twentyfourhour_queue(self):
# """
# Run job in 24 h queue
# """
# self.ressources['hcpu']='h_cpu=23:59:59'
#
# def fortyeighthour_queue(self):
# """
# Run job in 48 h queue
# """
# self.ressources['hcpu']='h_cpu=47:59:59'
#
# def prettyprint(self):
# """
# Give a nice representation which can be written in a
# submit file
# """
#
# prettystring = '#$ -S %s \n' %self.shelltype
# for item in self.ressources.keys():
# prettystring += '#$ -l %s \n' %self.ressources[item]
#
# prettystring += '#$ -cwd\n'
# prettystring += '#$ -j y \n'
# prettystring += '#$ -o %s \n' %self.logdir
# prettystring += '#$ -P %s \n' %self.project
# if self.arrayjob:
# prettystring += '#$ -t %i-%i\n' %(self.arrayjob)
#
# if self.mailoption:
# prettystring += '#$ -m %s\n\n' %self.mailoption + '\n\n'
# else:
# prettystring += '\n'
#
# return prettystring
#
# def choose_envshell(self, qualifier, sim=False):
# """
# Pick an env-shell for this job
# """
# for shell in GetEnvshells(qualifier,sim=sim):
# if qualifier in shell.split("/"):
# self.envshell = shell
# return shell
#
# def __repr__(self):
# return self.prettyprint()
#
# def __str__(self):
# return self.prettyprint()
#
#
#######################################################
#
#class FarmJob(object):
# """
# A wrapper for desy batch farmjobs array-style
# """
#
# def __init__(self,script,jobprops,jobname="batch",script_args=[],delay=0):
#
# jobdir = CONFIG.get("farmjobs","farmscriptdir")
#
# self.script = script
# self.jobname = jobname
# self.jobprops = jobprops
# self.script_args = script_args
# self.ready = False
# self.jobfile = RotatingFileNamer(jobdir,self.jobname,".sh")
# self.delay = delay
# self.jobid = 0
#
# def create_submission_file(self,copy_source=None,copy_dest=None):
# """
# Write the submission file to the
# output directory
# """
#
# f = open(self.jobfile,'w')
# f.write('#!/bin/zsh \n\n')
# if self.delay:
# delay_cond = "if (($SGE_TASK_ID > 600)); then\n sleep 600\n else\n sleep $SGE_TASK_ID\n fi\n\n"
# f.write(delay_cond)
# specs = self.jobprops.prettyprint()
# f.write(specs)
#
# # implement Hape's trap
# f.write("trap 'echo Job successfully completed' 0\n")
# f.write("trap 'echo Job killed by hitting limit;exit 2' USR1\n\n")
# if self.jobprops.usetmpdir:
#
# assert(copy_source is not None)
# #assert(copy_dest is not None)
#
# f.write('cd $TMPDIR \n')
# if copy_source.startswith("/acs"):
# f.write("dccp " + copy_source + '. \n')
# else:
# f.write('cp ' + copy_source + ' . \n')
#
#
# f.write('source /afs/ifh.de/user/s/stoessl/.zshrc' + '\n')
# f.write('envshell='+self.jobprops.envshell+'\n')
# f.write('script='+self.script.filename+'\n\n')
#
# commandostring = '$envshell $script '
# for s_arg in self.script_args:
# commandostring += s_arg
#
#
# f.write(commandostring)
# if self.jobprops.usetmpdir:
# #rm_filename = split(copy_source)[1]
# #f.write('rm -f ' + rm_filename + ' \n')
# if copy_dest is not None:
# f.write('cp * ' + copy_dest + ' \n')
#
# f.close()
# self.ready = True
#
# def submit(self):
# """
# submit the job to the batch system
# """
# if not self.ready:
# raise SubmissionError('need to prepare submission file first, not submitted!')
#
# cline = 'qsub ' + self.jobfile
# jobid = Popen(shlex.split(cline),stdout=PIPE).communicate()[0]
# self.jobid = jobid.split()[2]
# Logger.info('Job submitted with id %s!' %self.jobid)
# return self.jobid
#
###############################################################
#
#def ArrayJobFiles(files,chunksize,subjobs=[]):
# """
# Slice infiles in chunks and return an dict with
# chunknumber, chunk
# """
#
# file_dict = {}
# slices = int(float(len(files))/chunksize)
# for index,file_slice in enumerate(ListSlicer(files, slices)):
# if subjobs:
# if (index + 1) in subjobs:
# file_dict[index + 1] = file_slice # 0 is not a valid index for arrayjobs
# else:
# file_dict[index + 1] = file_slice # 0 is not a valid index for arrayjobs
#
# # filter out lists of length 0
# filtered_file_dict = dict()
# for k in file_dict.keys():
# if file_dict[k]:
# filtered_file_dict[k] = file_dict[k]
# return filtered_file_dict
#
################################################################
#
#def SimpleFarmer(func,job_kwargs,jobname="batch.sh",func_args=[],func_kwargs={},decorator=None):
# """
# Calculate func on farm as simple job
# """
# raise NotImplementedError("SimpleFarmer is not implemented yet!")
#
#
##################################################################
#
#def CopyFileToTmp(file,dcache=False):
# #subprocess.Popen("")
#
# pass
#
##################################################################
#
##FIXME: make it a class for inheritance
##class ArrayJobFarmer:
#
## def __init__(self):
## logdir = CONFIG.get('logging','farmlogs')
## farmjobdir = CONFIG.get('farmjobs','farmscriptdir')
##
## def __call__(self,merge,files,jobprops,jobname="batch",func_kwargs={},delay=False,subjobs=[]):
## pass
#
#def ArrayJobFarmer(func,merge,files,jobprops,jobname="batch",func_kwargs={},delay=False,subjobs=[],presliced=False):
# """
# Calculate func on the farm with arrayjobs with properties defined in job_kwargs
# CAVE: func_args are managed automatically
# """
#
# #if jobprops.usetmpdir:
# # assert merge == 1, "Can not currently copy more than 1 file to tmpdir"
# # copy_files = files
# # files = map(lambda x: os.path.split(x)[1],files)
#
# logdir = CONFIG.get('logging','farmlogs')
# farmjobdir = CONFIG.get('farmjobs','farmscriptdir')
#
# if presliced:
# slicedfiles = files
# n_jobs = len(slicedfiles.keys())
# else:
# # claculate n_jobs seperately, to avoid errors
# n_jobs = len(ArrayJobFiles(files,merge).keys())
# slicedfiles = ArrayJobFiles(files,merge,subjobs=subjobs)
#
# #print slicedfiles
# #print False in map(bool,[j for j in slicedfiles.values()])
# # write a python script
# # magicindex trick:
# # ensure that the sys.argv[1] of the script is used to
# # get the correct slize of files
#
# func_args = ["files[magicindex]"]
# farmpyscript = MetaScript(join(farmjobdir,jobname + ".py"),"w")
# farmpyscript.add_import(sys)
# farmpyscript.add_variable("magicindex","sys.argv[1]")
# farmpyscript.add_json_dumpable("files",slicedfiles)
# #print slicedfiles
# #raise
# if subjobs:
# farmpyscript.add_function(ExitFunc,["files","magicindex"],{},None)
# farmpyscript.add_function(func, func_args, func_kwargs)
# farmpyscript.write_exectutable()
#
# jobprops.as_arrayjob(1,n_jobs) # index starts at 1
# jobprops.specify_logdir(logdir)
# jobprops.as_sl6()
# #if jobprops.usetmpdir:
# # job = FarmJob(farmpyscript,jobprops,jobname=jobname,script_args=["$SGE_TASK_ID"],delay=delay,copy_source)
# #else:
# job = FarmJob(farmpyscript,jobprops,jobname=jobname,script_args=["$SGE_TASK_ID"],delay=delay)
# job.create_submission_file()
# job.submit()
# Logger.info("Job %s submitted with a range of %i:%i!" %(job.jobid,job.jobprops.arrayjob[0],job.jobprops.arrayjob[1]))
# return int(job.jobid.split(".")[0])
#
#######################################################################
#
#def FitJobFarmer(func,jobprops,n_jobs,jobname="fit_",func_kwargs={},decorator=None,func_selectable_args=None):
# """
# Calculate func on the farm with arrayjobs with properties defined in job_kwargs
# CAVE: func_args are managed automatically
# """
#
# logdir = CONFIG.get('logging','farmlogs') + "/fit/"
# farmjobdir = CONFIG.get('farmjobs','farmscriptdir')
#
# # write a python script
#
# #func_args = []
# farmpyscript = MetaScript(join(farmjobdir,jobname + ".py"),"w")
# farmpyscript.add_import(sys)
# if func_selectable_args is not None:
# farmpyscript.add_variable("magicindex","int(sys.argv[1]) - 1")
# farmpyscript.add_json_dumpable("seeds",func_selectable_args)
# func_args = ["seeds[magicindex]"]
# else:
# func_args = ["sys.argv[1]"]
#
# farmpyscript.add_function(func, func_args, func_kwargs, decorator)
# farmpyscript.write_exectutable()
#
# jobprops.as_arrayjob(1,n_jobs) # index starts at 1
# #jobprops.specify_logdir(logdir) # NO LOG! who needs that?
# job = FarmJob(farmpyscript,jobprops,jobname=jobname,script_args=["$SGE_TASK_ID"])
# job.create_submission_file()
# job.submit()
# Logger.info("Job %s submitted with a range of %i:%i!" %(job.jobid,job.jobprops.arrayjob[0],job.jobprops.arrayjob[1]))
# return int(job.jobid.split(".")[0])
#
#######################################################
#
#def TarFiles(infiles,filename="logs.tar"):
# """
# tar files together
# """
# archive = tarfile.open(filename,"w")
# map(archive.add,infiles)
# archive.close()
# Logger.info("Tar file written with %i files and name %s" %(len(infiles),filename))
# return filename
#
#######################################################
#
#def Resubmit(jobnum,errorlist):
# """
# Resubmit the failed jobs of an job-array
# """
# raise NotImplementedError
#
#######################################################
#
#def ResubmitFromDB(jobnum):
# """
# Get jobs from the db which failed and resubmit them
# """
#
# raise NotImplementedError
#
###################################################
#
#def StandardErrorGrepper(logs,summaryfile=None):
# """
# Search the logfiles for typical error patterns
# :param logs:
# :return:
# """
# errors = []
# unfinished = []
# mem_errors = []
# non_processed_files = []
# example_message = ""
#
# def file_len(fname):
# i = 0
# with open(fname) as f:
# for i, l in enumerate(f):
# pass
# return i + 1
#
# infilesfromlog = []
# for log in logs:
# linecnt = file_len(log)
# if not linecnt:
# Logger.warning("%s has zero lenght" %log.__repr__())
# f = open(log)
# txt = f.read()
# try:
# infilestring = txt.split("reader2: FilenameList =")[1].split("\n")[0].strip("[").strip("]").replace("[","").replace("'","").strip()
# infilesfromlog = infilestring.split(",")
# except:
# infilesfromlog = ""
# #exec("infilesfromlog = " + infilestring)
# if "rror" in txt:
# errors.append(log)
# example_message = txt
# non_processed_files += infilesfromlog
# if "MemoryError" in txt:
# mem_errors.append(log)
# non_processed_files += infilesfromlog
# if not "finish" in txt and (not linecnt ==2):
# unfinished.append(log)
# non_processed_files += infilesfromlog
#
# f.close()
#
# if errors or unfinished or mem_errors:
# Logger.info("Errors found, last log \n\n\n %s" %example_message)
#
# return errors,unfinished,mem_errors,example_message,non_processed_files
#
###################################################
#
#def FailedBZipTestSearcher(logs,summaryfile="dummy"):
#
# errors = []
# unfinished = []
# mem_errors = []
# example_message = ""
# summary = open(summaryfile,"w")
#
#
# def file_len(fname):
# i = 0
# with open(fname) as f:
# for i, l in enumerate(f):
# pass
# return i + 1
#
# for log in logs:
# linecnt = file_len(log)
# if not linecnt:
# Logger.warning("%s has zero length" %log.__repr__())
# f = open(log)
# txt = f.read()
# if "ERROR" in txt:
# errors.append(log)
# example_message = txt
# summary.write(txt)
# if "MemoryError" in txt:
# mem_errors.append(log)
# if not "finish" in txt and (not linecnt ==2):
# unfinished.append(log)
# f.close()
#
# if errors:
# Logger.info("Errors found, last log \n\n\n %s" %example_message)
#
# summary.close()
# return errors,unfinished,mem_errors,example_message,[]
#
#
#
###################################################
#
#def PostProcess(jobnum,tar=True,grepfunc=StandardErrorGrepper):
# """
# Check the logs if everthing went fine for this job
# tar: tar the logfiles together and delete the untared
# """
# # change to logdir to avoid nested tar-file
# current_dir = os.getcwd()
#
# logdir = CONFIG.get('logging','farmlogs')
# os.chdir(logdir)
# logs = glob("*.o%s.*" %str(jobnum))
#
# if not logs:
# tared_logs = glob("*.%i.tar" %jobnum)[0]
# Logger.info("Found a tared log for this job %s" %tared_logs)
# tared_logs = tarfile.TarFile(tared_logs)
# tared_logs.extractall()
# logs = glob("*.o%s.*" %str(jobnum))
#
# Logger.info("%i logs found for job %s" %(len(logs),str(jobnum)))
# # use unix grep, as it is faster
# #grepstring_finished = "fgrep finish %s/*.o%s.* | wc -l" %(logdir,str(jobnum))
# #grepstring_errors = "fgrep rror %s/*.o%s.* | wc -l" %(logdir,str(jobnum))
# #print shlex.split(grepstring_finished)
# #finished = Popen(shlex.split(grepstring_finished),stdout=PIPE).communicate()[0]
# #errors = Popen(shlex.split(grepstring_errors),stdout=PIPE).communicate()[0]
# #print finished,errors
# # look through the logs
# summary = join(logdir,"job%isummary.log" %jobnum)
# errors,unfinished,mem_errors,example_message,non_processed_files = grepfunc(logs,summaryfile=summary)
#
# Logger.info("Found %i jobs with errors" %len(errors))
# Logger.info("Found %i unfinished jobs" %len(unfinished))
# Logger.info("Found %i jobs with memory errors" %len(mem_errors))
#
# error_nums = [int(err.split(".")[-1]) for err in errors]
# unfinished_nums = [int(un .split(".")[-1]) for un in unfinished]
# mem_error_nums = [int(mem.split(".")[-1]) for mem in mem_errors]
# Logger.info("List of subjobs with errors %s" %error_nums)
# Logger.info("List of subjobs which are not finished %s" %unfinished_nums)
# Logger.info("List of subjobs with memory errors %s" %mem_error_nums)
# if tar:
# tarname = logs[0].split(".")[0] + ".%s" %jobnum.__repr__() + ".tar"
# TarFiles(logs,filename=tarname)
# map(os.remove,logs)
#
# # change back to previous dir
# os.chdir(current_dir)
#
# sgejobinfo = GetJobInfoSGEJobs(jobnum)
# mem_exceeded = []
# failed = []
# for thisjob in sgejobinfo:
# try:
# maxmem = int(thisjob["category"].split(",")[2].split("vmem")[1].rstrip("M"))
# usedmem = thisjob["maxvmem"]
# if usedmem > maxmem:
# mem_exceeded.append(int(thisjob["task_number"]))
# if int(thisjob["exit_status"] != 0):
# failed.append(int(thisjob["task_number"]))
# except Exception as e:
# Logger.debug("Exception %s arised during job check for database!" %e.__repr__())
#
# error_dict = dict()
# error_dict["mem"] = list(set(mem_error_nums + mem_exceeded))
# error_dict["unfin"] = list(set(unfinished_nums + failed))
# error_dict["error"] = error_nums
# error_dict["nonprfiles"] = non_processed_files
# return error_dict
#
#
#
###################################################
#
#def CheckDataSet(dataset,filetype="raw",usefarm=False):
# """
# Check if all files are fine with bzip -t
# """
#
# assert filetype in ["raw","l3a"], "Can only check for 'raw' or 'l3a' filetype"
#
# rmtmp = False
#
# if filetype == "raw":
# files = dataset.raw_files_on_disk
#
# if filetype == "l3a":
# files = dataset.l3afiles
#
# if usefarm:
# jobprops = JobProperties()
# jobprops.short_queue()
# thejob = ArrayJobFarmer(CheckBzipIntegrity,1,files,jobprops=jobprops,jobname="ch%i" %dataset.dataset)
# while CheckJobOnFarm(thejob):
# sleep(120)
# result = PostProcess(thejob,grepfunc=FailedBZipTestSearcher)
#
# else:
# corrupt = 0
# logfile = open("/afs/ifh.de/user/s/stoessl/scratch/" + "bztest%i" %dataset.dataset + ".log","w")
# for i in files:
# if (i.startswith("/acs") or i.startswith("dcap")):
# if i.startswith("dcap"):
# name = i.replace("dcap://","")
#
# command = "dccp %s /lustre/fs15/group/icecube/stoessl/tmp/" %name
# Popen(shlex.split(command),stdout=PIPE).communicate()[0]
# thefile = join("/lustre/fs15/group/icecube/stoessl/tmp/",split(i)[1])
# print command, thefile
# time.sleep(1)
# rmtmp = True
# else:
# thefile = i
# name = thefile
# print thefile
# #raise
# if not CheckBzipIntegrity(thefile):
# logfile.write("BZ2 Error " + i + "\n" )
# corrupt += 1
# if rmtmp:
# command = "rm /lustre/fs15/group/icecube/stoessl/tmp/%s" %split(name)[1]
# Popen(shlex.split(command),stdout=PIPE).communicate()[0]
#
# logfile.close()
# print len(files),"files"
# print corrupt,"corrupt"
# print "Done"
#
#
#
#####################################################
#
#def _parse_arcx_sgejobs(sgejobinfo):
#
# all_jobs = []
# cnt = 1
# jobinfo = dict()
# for line in sgejobinfo.split("\n"):
# data = line.split("=")
# data = (data[0].strip(),"".join(data[1:]).strip())
# jobinfo[data[0]] = data[1]
# # each job has 29 fields of inrormation
# cnt += 1
# if cnt == 30:
# all_jobs.append(copy(jobinfo))
# cnt = 1
# jobinfo = dict()
#
# return all_jobs
#
##########################################
#
#def _parse_jobinfo(qstatjobinfo):
# """
# parse the output of qstat -j
# """
#
# qstat_dict = dict()
# for line in qstatjobinfo.split("\n"):
# if ":" in line:
# data = line.split(":")
# if len(data) != 2:
# continue
# data = (data[0].strip(),data[1].strip())
# qstat_dict.update([data])
#
# return qstat_dict
#
################################################
#
#def CheckJobOnFarm(jobnum):
# x = GetJobinfo(jobnum)
# return len(x.keys()) > 0
#
#
################################################
#
#def GetJobinfo(jobnum):
# """
# call qstat -j jobnum and parse the output
# """
#
# command = "qstat -ext -j " + str(jobnum)
# qstatinfo = Popen(shlex.split(command),stdout=PIPE).communicate()[0]
# infodict = _parse_jobinfo(qstatinfo)
# return infodict
#
##################################
#
#def GetJobInfoSGEJobs(jobnum):
#
# command = "arcx sgejobs %s" %str(jobnum)
# sgejobinfo = Popen(shlex.split(command),stdout=PIPE).communicate()[0]
# infos = _parse_arcx_sgejobs(sgejobinfo)
# return infos
#
##################################
#
#def GetGCD():
# """
# Get some random gcd
# """
#
# raise NotImplementedError
#
#
#
##################################################
#
#class Shepherd(object):
# """
# Watch the sheep lingering on the beautiful farming grounds in Zeuthen,
# but don't fall asleep....
# """
#
# def __init__(self,fnct,fnct_kwargs,datasetlist,ana_level,maxjobs=20000,dbmanangertype=None,logfile=CONFIG.get("logging","shepherd_log"),port=59322,notify_on_port=None,delay=0):
# self.maxjobs = maxjobs
# self.datasets = datasetlist
# self.fnct = fnct
# self.delay = delay
# self.fnct_kwargs = fnct_kwargs
# self.ana_level = ana_level
# self.notify_on_port= notify_on_port
# self.running_jobs = []
# self.finished_jobs = []
# self.merge = 1
# fh = logging.FileHandler(logfile)
# Logger.addHandler(fh)
# Logger.info("Shepherd initialized!")
# Logger.info("Will process the following datasets %s" %self.datasets.__repr__())
# self.dbmanagertype = dbmanangertype
# context = zmq.Context()
# self.socket = context.socket(zmq.REP)
# self.socket.bind("tcp://127.0.0.1:%s" %str(port))
#
# #def alter_fnct_kwargs(self,fnct_kwargs):
# # self.fnct_kwargs = fnct_kwargs
#
# #def receive_dataset(self):
# # dataset = self.socket.recv()
# # dataset = int(dataset)
# # Logger.info("Got dataset %s" %dataset.__repr__())
# # return dataset
#
# def get_active_jobs(self):
# """
# Count the number of active jobs for this user
# """
# c_line = ['qstat', '-g','d','-u', 'stoessl']
# batch_status = Popen(c_line,stdout=PIPE)
# c_line_wc = ['wc', '-l']
# counter = Popen(c_line_wc,stdin=batch_status.stdout,stdout=PIPE)
# batch_status.stdout.close()
# jobsonfarm = int(counter.communicate()[0])
# batch_status.kill()
# Logger.debug("Got %i jobs currently on the batch system" %jobsonfarm)
# return jobsonfarm
#
#
# def submit_dataset(self,dataset,gcd = "/lustre/fs6/group/i3/stoessl/GeoCalibDetectorStatus_IC79.55380_L2a.i3",ram=4000,cpu="23",subjobs=[],jobname="L3afilter"):
# """
# Submit a dataset to the batch system
# """
#
#
#
# if int(dataset) in [6451,6939]:
# infiles = file_db.GetFilesFromDB( dataset=int(dataset),analysis_level=self.ana_level,coincident=1)
# else:
# infiles = file_db.GetFilesFromDB( dataset=int(dataset),analysis_level=self.ana_level)
# infiles = [ str(file.filename) for file in infiles]
# if len(infiles) > 10000:
# self.merge=10
# else:
# self.merge=1
#
# Logger.info("Will submit job for dataset %i with %i infiles, will merge %i files" %(int(dataset),len(infiles),int(self.merge)))
#
# self.fnct_kwargs.update([("merge",int(self.merge))])
# self.fnct_kwargs.update([("dataset",int(dataset))])
# #jobprops = JobPropertiesFactory("long_run_high_mem")
# jobprops = JobProperties()
# jobprops.request_ram(ram)
# jobprops.specify_hcpu(cpu,"59")
# jobid = 0
# if int(dataset) in [6451,6939]:
# # coincident + polygonato corsika needs double processing
# for __ in range(1):
# jobid = ArrayJobFarmer(self.fnct, int(self.merge), infiles, jobprops, jobname + str(dataset), func_kwargs= self.fnct_kwargs,subjobs=subjobs,delay=self.delay)
# self.fnct_kwargs.update([('coincident',True)])
# self.running_jobs.append(jobid)
# Logger.info("Submitted job %i" %(int(jobid)))
#
# else:
# jobid = ArrayJobFarmer(self.fnct, int(self.merge), infiles, jobprops, jobname + str(dataset), func_kwargs= self.fnct_kwargs,subjobs=subjobs,delay=self.delay)
# self.running_jobs.append(jobid)
# Logger.info("Submitted job %i" %(int(jobid)))
#
#
# return jobid
#
# def _getAFSToken(self):
# """
# Keep the job alive and get a new afs token,
# so that it can still write data to disk
# """
# Popen(['kinit', '-R'])
#
#
# #def add_dataset(self,dataset):
# # """
# # Add another dataset to the inline queue
# # """
# # self.datasets.append(dataset)
#
#
# def do_farming(self):
# """
# Submit and surveil all the datasets which are in
# self.datasets
# """
# #dbmanager = DBManager(managertype=self.dbmanagertype)
# Logger.info("Start to process jobs...")
# jobdict = dict()
#
# if not len(self.datasets):
# raise ValueError("No datasets available for farming!")
#
# while True:
#
# time.sleep(30)
# #maxjobsexceeded = False
# # receive new jobs
# #try:
# # ds = self.receive_dataset()
# # self.datasets.append(ds)
# # Logger.info("Received dataset %s to process"%ds.__repr__())
# #except Exception as e:
# # Logger.warning("Caught exception %s" %e.__repr__())
#
# self._getAFSToken()
#
# while len(self.datasets):
# if self.get_active_jobs() > self.maxjobs:
# #maxjobsexceeded = True
# break
#
# thisset = self.datasets.pop()
# jobid = self.submit_dataset(thisset)
# jobdict[jobid] = thisset
# self.running_jobs.append(jobid)
# time.sleep(5)
#
# for job in self.running_jobs:
# if not CheckJobOnFarm(job): # assume job is finished
# errors = dict()
# try:
# errors = PostProcess(job)
# except Exception as e:
# Logger.warn("Caught Exception %s" %e.__repr__())
# Logger.warn("Can not postprocess %s" %job.__repr__())
# newmem = 4000
# newcpu = "47"
# if errors:
# if errors["mem"]:
# newmem = 10000
#
# failed_jobs = errors["mem"] + errors["unfin"] + errors["error"]
# failed_jobs = list(set(failed_jobs))
# if failed_jobs:
# self.submit_dataset(thisset,ram=newmem, cpu=newcpu, subjobs=failed_jobs)
#
# else:
# # cc = False
# # thisset = jobdict[jobid]
# # if int(thisset) in [6451,6939]:
# # cc = True
# # dbmanager.append(thisset,cc)
# self.finished_jobs.append(job)
#
#
#
# #sendercontext = zmq.Context()
# #sendsocket = sendercontext.socket(zmq.REQ)
# #sendsocket.connect("tcp://127.0.0.1:%s" %str(self.notify_on_port))
# #sendsocket.send(str(thisset))
# # ping a another instance here and let it know that we are finished
#
#
# for job in self.finished_jobs:
# if job in self.running_jobs:
# self.running_jobs.remove(job)
#
#
# def shutdown(self):
# pass
#
###################################################
#
#class JobDBProxy(object):
# """
# Connect to a job-table and manage datastream
# """
#
# def __init__(self,dbfile=CONFIG.get("database","jobdb")):
# self.dbfile = dbfile
#
# def _re_initialize_tables(self,force=False):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# if not force:
# raise ValueError("Need to force re-initialization!")
# #curs = SQLite()
# try:
# cursor.execute("DROP TABLE JOBS")
# except sqlite3.OperationalError as e:
# Logger.warning("Got sqlite3 error %s" %e.__repr__())
# cursor.execute("CREATE TABLE JOBS ( id INTEGER PRIMARY KEY AUTOINCREMENT,dataset int,jobid int)")
# CloseSQLiteDB(connection)
#
# def add_job(self,jobid,dataset):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# cursor.execute("INSERT INTO JOBS (dataset,jobid) VALUES (?,?)" %(dataset,jobid))
# CloseSQLiteDB(connection)
#
# def get_job(self,jobid):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# cursor.execute("SELECT * FROM JOBS WHERE jobid=%i" %(jobid))
# data = cursor.fetchall()
# CloseSQLiteDB(connection)
# return data
#
# def get_dataset(self,jobid):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# cursor.execute("SELECT * FROM JOBS WHERE jobid=%i" %(jobid))
# data = cursor.fetchall()
# CloseSQLiteDB(connection)
# return data
#
#
#
#
#
#
#
#
| gpl-3.0 |
larsmans/scipy | scipy/stats/_discrete_distns.py | 6 | 21338 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
loafbaker/django_ecommerce1 | orders/migrations/0004_auto__chg_field_order_billing_address__chg_field_order_shipping_addres.py | 1 | 8620 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Order.billing_address'
db.alter_column(u'orders_order', 'billing_address_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['accounts.UserAddress']))
# Changing field 'Order.shipping_address'
db.alter_column(u'orders_order', 'shipping_address_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['accounts.UserAddress']))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Order.billing_address'
raise RuntimeError("Cannot reverse this migration. 'Order.billing_address' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Order.billing_address'
db.alter_column(u'orders_order', 'billing_address_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserAddress']))
# User chose to not deal with backwards NULL issues for 'Order.shipping_address'
raise RuntimeError("Cannot reverse this migration. 'Order.shipping_address' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Order.shipping_address'
db.alter_column(u'orders_order', 'shipping_address_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserAddress']))
models = {
u'accounts.useraddress': {
'Meta': {'ordering': "['-updated', '-timestamp']", 'object_name': 'UserAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'billing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'carts.cart': {
'Meta': {'object_name': 'Cart'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '100', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orders.order': {
'Meta': {'object_name': 'Order'},
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'billing_address'", 'null': 'True', 'to': u"orm['accounts.UserAddress']"}),
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['carts.Cart']"}),
'final_total': ('django.db.models.fields.DecimalField', [], {'default': "'10.99'", 'max_digits': '1000', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_id': ('django.db.models.fields.CharField', [], {'default': "'ABC'", 'unique': 'True', 'max_length': '120'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shipping_address'", 'null': 'True', 'to': u"orm['accounts.UserAddress']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Started'", 'max_length': '120'}),
'sub_total': ('django.db.models.fields.DecimalField', [], {'default': "'10.99'", 'max_digits': '1000', 'decimal_places': '2'}),
'tax_total': ('django.db.models.fields.DecimalField', [], {'default': "'0.99'", 'max_digits': '1000', 'decimal_places': '2'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['orders'] | mit |
SteadfastInnovation/libhpdf-AndroidLibrary | library/jni/libhpdf-2.3.0RC2/if/python/demo/jpfont_demo.py | 32 | 6168 | ###
## * << Haru Free PDF Library 2.0.0 >> -- jpfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main():
global pdf
detail_font=[None for i in range(16)]
PAGE_HEIGHT = 210
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
samp_text=f.read(2048)
f.close ()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# configure pdf-document to be compressed.
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# declaration for using Japanese font, encoding.
HPDF_UseJPEncodings (pdf)
HPDF_UseJPFonts (pdf)
detail_font[0] = HPDF_GetFont (pdf, "MS-Mincyo", "90ms-RKSJ-H")
detail_font[1] = HPDF_GetFont (pdf, "MS-Mincyo,Bold", "90ms-RKSJ-H")
detail_font[2] = HPDF_GetFont (pdf, "MS-Mincyo,Italic", "90ms-RKSJ-H")
detail_font[3] = HPDF_GetFont (pdf, "MS-Mincyo,BoldItalic", "90ms-RKSJ-H")
detail_font[4] = HPDF_GetFont (pdf, "MS-PMincyo", "90msp-RKSJ-H")
detail_font[5] = HPDF_GetFont (pdf, "MS-PMincyo,Bold", "90msp-RKSJ-H")
detail_font[6] = HPDF_GetFont (pdf, "MS-PMincyo,Italic", "90msp-RKSJ-H")
detail_font[7] = HPDF_GetFont (pdf, "MS-PMincyo,BoldItalic",
"90msp-RKSJ-H")
detail_font[8] = HPDF_GetFont (pdf, "MS-Gothic", "90ms-RKSJ-H")
detail_font[9] = HPDF_GetFont (pdf, "MS-Gothic,Bold", "90ms-RKSJ-H")
detail_font[10] = HPDF_GetFont (pdf, "MS-Gothic,Italic", "90ms-RKSJ-H")
detail_font[11] = HPDF_GetFont (pdf, "MS-Gothic,BoldItalic", "90ms-RKSJ-H")
detail_font[12] = HPDF_GetFont (pdf, "MS-PGothic", "90msp-RKSJ-H")
detail_font[13] = HPDF_GetFont (pdf, "MS-PGothic,Bold", "90msp-RKSJ-H")
detail_font[14] = HPDF_GetFont (pdf, "MS-PGothic,Italic", "90msp-RKSJ-H")
detail_font[15] = HPDF_GetFont (pdf, "MS-PGothic,BoldItalic",
"90msp-RKSJ-H")
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "JP font demo", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
for i in range(16):
# add a new page object.
page = HPDF_AddPage (pdf)
# create outline entry
outline = HPDF_CreateOutline (pdf, root,
HPDF_Font_GetFontName (detail_font[i]), NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Outline_SetDestination(outline, dst)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, HPDF_Font_GetFontName (detail_font[i]))
HPDF_Page_SetFontAndSize (page, detail_font[i], 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font[i], 10)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font[i], 16)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font[i], 23)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font[i], 30)
HPDF_Page_ShowText (page, samp_text)
p = HPDF_Page_GetCurrentTextPos (page)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
x_pos = 20
for j in range(len (samp_text) // 2):
HPDF_Page_MoveTo (page, x_pos, p.y - 10)
HPDF_Page_LineTo (page, x_pos, p.y - 12)
HPDF_Page_Stroke (page)
x_pos = x_pos + 30
HPDF_Page_SetWidth (page, p.x + 20)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 25)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 85)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 85)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, p.y - 12)
HPDF_Page_LineTo (page, p.x + 10, p.y - 12)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | apache-2.0 |
theadviceio/executer | tests/utils/test_utils_errors.py | 1 | 2265 | import sys
import os
import unittest
import tempfile
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)) + "/../../"))
import utils.errors
# logname = "/tmp/test.log"
# print("-----------------------------")
# print((" current logs in %s" % logname))
# print("-----------------------------")
__version__ = 0.1
__author__ = 'weldpua2008@gmail.com'
def get_temp_filename(deleted=False):
file = tempfile.NamedTemporaryFile(delete=deleted, prefix='_rde_logtmp')
new_file_name = file.name
#file.close
return new_file_name
class TestUtilsErrorsClass(unittest.TestCase):
def test_get_runtime_error(self):
logged_string = "sssssssASdAadDASdasD"
with self.assertRaises(RuntimeError):
utils.errors.get_runtime_error(error=logged_string)
def test_get_io_error(self):
logged_string = "get_io_error_sssssssASdAadDASdasD"
with self.assertRaises(IOError):
utils.errors.get_io_error(error=logged_string)
def test_get_notimplemented_error(self):
logged_string = "get_notimplemented_error_sssssssASdAadDASdasD"
with self.assertRaises(NotImplementedError):
utils.errors.get_notimplemented_error(error=logged_string)
def test_get_transition_error(self):
logged_string = "get_transition_error_sssssssASdAadDASdasD"
with self.assertRaises(utils.errors.TransitionError):
utils.errors.get_transition_error(error=logged_string)
def test_get_key_error(self):
logged_string = "get_key_error_error_sssssssASdAadDASdasD"
with self.assertRaises(KeyError):
utils.errors.get_key_error(error=logged_string)
def test_get_type_error(self):
logged_string = "get_type_error_sssssssASdAadDASdasD"
with self.assertRaises(TypeError):
utils.errors.get_type_error(error=logged_string)
def test_get_value_error(self):
logged_string = "get_value_error_sssssssASdAadDASdasD"
with self.assertRaises(ValueError):
utils.errors.get_value_error(error=logged_string)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
holgerBerger/go_ludalo | top/top.py | 1 | 13095 | #!/usr/bin/env python
#
# indices used:
#
# use goludalo
# db.<fs>.createIndex({"ts":1, "nid":1})
#
# use ludalo
# db.jobs.createIndex({"start":1})
# db.jobs.createIndex({"end":1})
# db.jobs.createIndex({"jobid":1})
import time,sys
# CONFIG
DBHOST="localhost"
SNAP = 5
PERFDB="goludalo"
JOBDB="ludalo"
JOBCOLLECTION="jobs"
# map filesystems to batchservers to skip some DB queries
batchservermap={
"nobnec":"intern2",
"alnec":"intern3"
}
batchskip=True # set to false if skipping map should not be used
# END CONFIG
import pymongo
# round current time to snap
def getCurrentSnapTime():
return (int(time.time())/SNAP)*SNAP
# nodestats object, represents a node and its IO statistics
class nodestats(object):
def __init__(self, nodename):
self.nodename = nodename
self.miops = 0
self.wiops = 0
self.wbw = 0
self.riops = 0
self.rbw = 0
self.dt = 0
def __repr__(self):
return "%s: [%d,%d,%d,%d,%d]" % (self.nodename, self.miops, self.wiops, self.wbw, self.riops, self.rbw)
# jobstats object, represents a node and its IO statistics
class jobstats(object):
def __init__(self, jobid):
self.jobid = jobid
self.miops = 0
self.wiops = 0
self.wbw = 0
self.riops = 0
self.rbw = 0
self.nodelist=[]
self.start = 0
self.end = 0
self.owner = ""
self.cmd = ""
self.dt = 1
def addnode(self, node):
self.miops += node.miops
self.wiops += node.wiops
self.wbw += node.wbw
self.riops += node.riops
self.rbw += node.rbw
self.dt = node.dt
def __repr__(self):
return "%s: %s [%d,%d,%d,%d,%d]" % (self.jobid, self.owner, self.miops, self.wiops, self.wbw, self.riops, self.rbw)
# filesystem object, containing mongo connections
class filesystem(object):
def __init__(self, server, fsname):
self.fsname = fsname
self.client = pymongo.MongoClient(server)
self.perfdb = self.client[PERFDB]
self.perfcoll = self.perfdb[fsname]
self.jobdb = self.client[JOBDB]
self.jobcoll = self.jobdb[JOBCOLLECTION]
# get latest timestamp, searching 5 minutes in the past
def getLatestTs(self):
latest=self.perfcoll.find({"ts": {"$gt":getCurrentSnapTime()-300}}).sort("ts",pymongo.DESCENDING)[0][u'ts']
return latest
# get entries for a certain timestamp
def getEntries(self, timestamp):
for p in self.perfcoll.find({"ts":timestamp}):
yield p
# get a tuple of current timestamp and a dict of nodestats of all nodes doing IO at the moment
def currentNodesstats(self):
nodes={}
timestamp=self.getLatestTs()
for e in self.getEntries(timestamp):
node = e["nid"]
if node == "aggr": continue
if node not in nodes:
nodes[node]=nodestats(node)
nodes[node].dt = e['dt']
if 'mdt' in e:
nodes[node].miops += e['v']
elif 'ost' in e:
nodes[node].wiops += e['v'][0]
nodes[node].wbw += e['v'][1]
nodes[node].riops += e['v'][2]
nodes[node].rbw += e['v'][3]
return (timestamp, nodes)
# map a dict of nodestats to running jobs at the time
def mapNodesToJobs(self, timestamp, nodes):
# { "_id" : ObjectId("559e8c86580eb358815b87a2"), "end" : 1436430906, "cmd" : "", "jobid" : "659096.intern2-2015", "nids" : "n151001", "start" : 1436425679, "owner" : "ppb742", "calc" : -1 }
# FIXME -1 = running
jobs={}
nidtojob={}
for j in self.jobcoll.find({ "$and" : [ {"end":-1}, {"start": {"$lt":timestamp}} ] }):
jobid=j["jobid"]
if jobid not in jobs:
jobs[jobid]=jobstats(jobid)
for nid in j["nids"].split(","):
nidtojob[nid]=jobid
jobs[jobid].nodelist.append(nid)
jobs[jobid].start = j["start"]
jobs[jobid].end = j["end"]
jobs[jobid].owner = j["owner"]
jobs[jobid].cmd = j["cmd"]
# joblist contains now list of jobs of ALL clusters!!!
fsjobs=set()
for node in nodes:
try:
job = nidtojob[node]
jobs[job].addnode(nodes[node])
if job not in fsjobs:
fsjobs.add(job)
except KeyError:
jobs[node]=jobstats(node)
jobs[node].addnode(nodes[node])
jobs[node].nodelist.append(node)
if node not in fsjobs:
fsjobs.add(node)
localjobs={}
for j in fsjobs:
localjobs[j]=jobs[j]
return localjobs
# get a certain running job
def getOneRunningJob(self, jobid):
j = self.jobcoll.find_one({"jobid":jobid})
if j == None: return None
job=jobstats(jobid)
for nid in j["nids"].split(","):
job.nodelist.append(nid)
job.start = j["start"]
job.end = j["end"]
job.owner = j["owner"]
job.cmd = j["cmd"]
try:
job.cachets = j["cachets"]
job.miops = j["miops"]
job.wiops = j["wiops"]
job.wbw = j["wbw"]
job.riops = j["riops"]
job.rbw = j["rbw"]
except KeyError:
# no cached data for this job
job.cachets = job.start
job.miops = 0
job.wiops = 0
job.wbw = 0
job.riops = 0
job.rbw = 0
return job
# get all running jobs (from all clusters, can not be avoided)
def getRunningJobs(self):
jobs={}
for j in self.jobcoll.find({"end":-1}):
jobid=j["jobid"]
if batchskip and jobid.find(batchservermap[self.fsname])<0:
continue
if jobid not in jobs:
jobs[jobid]=jobstats(jobid)
for nid in j["nids"].split(","):
jobs[jobid].nodelist.append(nid)
jobs[jobid].start = j["start"]
jobs[jobid].end = j["end"]
jobs[jobid].owner = j["owner"]
jobs[jobid].cmd = j["cmd"]
try:
jobs[jobid].cachets = j["cachets"]
jobs[jobid].miops = j["miops"]
jobs[jobid].wiops = j["wiops"]
jobs[jobid].wbw = j["wbw"]
jobs[jobid].riops = j["riops"]
jobs[jobid].rbw = j["rbw"]
except KeyError:
# no cached data for this job
jobs[jobid].cachets = jobs[jobid].start
jobs[jobid].miops = 0
jobs[jobid].wiops = 0
jobs[jobid].wbw = 0
jobs[jobid].riops = 0
jobs[jobid].rbw = 0
return jobs
# go over all jobs in list, and add all stats of nodes in job from start to end
# to it (end==-1 is covered, so can be used for running jobs as well)
def accumulateJobStats(self, jobs):
fsjobs=set()
for j in jobs:
if batchskip and j.find(batchservermap[self.fsname])<0:
continue
if jobs[j].end == -1:
end = int(time.time())
else:
end = jobs[j].end
# we start from cached data, if nothing is cached, this is start
start = jobs[j].cachets
# print "scanning for",end-start, "sec for",j
for e in self.perfcoll.find({"$and": [ {"ts": {"$gt": start}}, {"ts": {"$lt": end}}, {"nid": {"$in": jobs[j].nodelist}} ] }):
node = e["nid"]
if node == "aggr": continue
if 'mdt' in e:
jobs[j].miops += e['v']
elif 'ost' in e:
jobs[j].wiops += e['v'][0]
jobs[j].wbw += e['v'][1]
jobs[j].riops += e['v'][2]
jobs[j].rbw += e['v'][3]
fsjobs.add(j)
# update cache, write cachets, between start and cachets, data was already summed up
# print "update", j
self.jobcoll.update(
{"jobid":j},
{"$set": {
"cachets":end,
"miops":jobs[j].miops,
"wiops":jobs[j].wiops,
"wbw":jobs[j].wbw,
"riops":jobs[j].riops,
"rbw":jobs[j].rbw
} } )
localjobs={}
for j in fsjobs:
localjobs[j]=jobs[j]
return localjobs
def readFSMaxima(self):
self.maxcoll = self.perfdb["fsmaxima"]
e = self.maxcoll.find_one({"fsname": self.fsname})
if e == None:
return [0, 0, 0, 0, 0, 0]
else:
return e["maxima"]
# 0: nodes
# 1: metadata
# 2: wrqs
# 3: rrqs
# 4: wbw
# 5: rbw
def writeFSMaxima(self, maxima):
r = self.maxcoll.update(
{"fsname": self.fsname},
{"$set": {
"maxima": maxima
}
} ,
upsert=True
)
#print r
# get AGGR values for fs from start to end
def getFSvalues(self, start, end):
timelist = {}
for e in self.perfcoll.find({"$and": [ {"ts": {"$gt": start}}, {"ts": {"$lt": end}}, {"nid": "aggr"} ] }):
ts = e["ts"]
if ts not in timelist:
timelist[ts]={}
timelist[ts]["miops"] = 0
timelist[ts]["wiops"] = 0
timelist[ts]["wbw"] = 0
timelist[ts]["riops"] = 0
timelist[ts]["rbw"] = 0
if 'mdt' in e:
timelist[ts]["miops"] += e['v']
elif 'ost' in e:
timelist[ts]["wiops"] += e['v'][0]
timelist[ts]["wbw"] += e['v'][1]
timelist[ts]["riops"] += e['v'][2]
timelist[ts]["rbw"] += e['v'][3]
return timelist
# print TOP like list of jobs, with current rates
def printTopjobs(fsname, key):
fs = filesystem(DBHOST, fsname)
(timestamp, nodes) = fs.currentNodesstats()
print time.ctime(timestamp),"\n"
jobs = fs.mapNodesToJobs(timestamp, nodes)
if key == "meta":
sortf=lambda x: x.miops
elif key == "iops":
sortf=lambda x: x.wiops+x.riops
elif key == "bw":
sortf=lambda x: x.rbw+x.wbw
else:
print "use meta, iops or bw as sorting key"
sys.exit()
print "JOBID OWNER NODES META WRITE WrBW READ ReBW"
print " IOPS IOPS MB/s IOPS MB/s"
print "=================================================================="
for j in sorted(jobs.values(), key=sortf, reverse=True):
dt = float(j.dt)
print "%-10s %-8s %-5s %6d %6d %9.2f %6d %9.2f" % (j.jobid.split(".")[0], j.owner, len(j.nodelist), j.miops/dt, j.wiops/dt, (j.wbw/dt)/1000000.0, j.riops/dt, (j.rbw/dt)/1000000.0)
# print TOP like list of jobs, with absolute values over runtime (sum over time)
def printJobSummary(fsname, key):
fs = filesystem(DBHOST, fsname)
jobs = fs.getRunningJobs()
jobs = fs.accumulateJobStats(jobs)
if key == "meta":
sortf=lambda x: x.miops
elif key == "iops":
sortf=lambda x: x.wiops+x.riops
elif key == "bw":
sortf=lambda x: x.rbw+x.wbw
else:
print "use meta, iops or bw as sorting key"
sys.exit()
print "JOBID OWNER NODES TIME META WRITE WrBW READ ReBW"
print " [H] KIOPS KIOPS [GB] KIOPS [GB]"
print "======================================================================="
for j in sorted(jobs.values(), key=sortf, reverse=True):
print "%-10s %-8s %-5s %4.1f %6d %6d %9.2f %6d %9.2f" % (j.jobid.split(".")[0], j.owner, len(j.nodelist), (time.time()-j.start)/3600, j.miops/1000, j.wiops/1000, j.wbw/1000000000.0, j.riops/1000, j.rbw/1000000000.0)
if __name__ == '__main__':
if len(sys.argv)<4:
print "usage: top.py [sum|top] fsname [meta|iops|bw]"
print " sum: show aggregated values over runtime of active jobs"
print " top: show current values of active jobs"
print
print " meta: sort for metadata operation"
print " iops: sort for iops"
print " bw: sort for bandwidth"
sys.exit(0)
if sys.argv[1]=="top":
printTopjobs(sys.argv[2], sys.argv[3])
if sys.argv[1]=="sum":
printJobSummary(sys.argv[2], sys.argv[3])
| gpl-2.0 |
wagnerand/addons-server | src/olympia/amo/pagination.py | 4 | 3755 | from math import ceil
from django.conf import settings
from django.core.paginator import (
EmptyPage,
InvalidPage,
Page,
PageNotAnInteger,
Paginator,
)
from django.utils.functional import cached_property
class ESPaginator(Paginator):
"""
A better paginator for search results
The normal Paginator does a .count() query and then a slice. Since ES
results contain the total number of results, we can take an optimistic
slice and then adjust the count.
:param use_elasticsearch_dsl:
Used to activate support for our elasticsearch-dsl based pagination
implementation. elasticsearch-dsl is being used in the v3+ API while
we have our own wrapper implementation in :mod:`olympia.amo.search`.
"""
# Maximum result position. Should match 'index.max_result_window' ES
# setting if present. ES defaults to 10000 but we'd like more to make sure
# all our extensions can be found if searching without a query and
# paginating through all results.
max_result_window = settings.ES_MAX_RESULT_WINDOW
def __init__(self, *args, **kwargs):
self.use_elasticsearch_dsl = kwargs.pop('use_elasticsearch_dsl', True)
Paginator.__init__(self, *args, **kwargs)
@cached_property
def num_pages(self):
"""
Returns the total number of pages.
"""
if self.count == 0 and not self.allow_empty_first_page:
return 0
# Make sure we never return a page beyond max_result_window
hits = min(self.max_result_window, max(1, self.count - self.orphans))
return int(ceil(hits / float(self.per_page)))
def validate_number(self, number):
"""
Validates the given 1-based page number.
This class overrides the default behavior and ignores the upper bound.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a page object.
This class overrides the default behavior and ignores "orphans" and
assigns the count from the ES result to the Paginator.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top > self.max_result_window:
raise InvalidPage('That page number is too high for the current page size')
# Force the search to evaluate and then attach the count. We want to
# avoid an extra useless query even if there are no results, so we
# directly fetch the count from hits.
if self.use_elasticsearch_dsl:
result = self.object_list[bottom:top].execute()
# Overwrite `object_list` with the list of ES results.
page = Page(result.hits, number, self)
# Overwrite the `count` with the total received from ES results.
self.count = int(page.object_list.total)
else:
page = Page(self.object_list[bottom:top], number, self)
# Force the search to evaluate and then attach the count.
list(page.object_list)
self.count = int(page.object_list.count())
# Now that we have the count validate that the page number isn't higher
# than the possible number of pages and adjust accordingly.
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return page
| bsd-3-clause |
jylaxp/django | tests/admin_utils/models.py | 217 | 1821 | from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Site(models.Model):
domain = models.CharField(max_length=100)
def __str__(self):
return self.domain
class Article(models.Model):
"""
A simple Article model for testing
"""
site = models.ForeignKey(Site, models.CASCADE, related_name="admin_articles")
title = models.CharField(max_length=100)
title2 = models.CharField(max_length=100, verbose_name="another name")
created = models.DateTimeField()
def test_from_model(self):
return "nothing"
def test_from_model_with_override(self):
return "nothing"
test_from_model_with_override.short_description = "not What you Expect"
@python_2_unicode_compatible
class Count(models.Model):
num = models.PositiveSmallIntegerField()
parent = models.ForeignKey('self', models.CASCADE, null=True)
def __str__(self):
return six.text_type(self.num)
class Event(models.Model):
date = models.DateTimeField(auto_now_add=True)
class Location(models.Model):
event = models.OneToOneField(Event, models.CASCADE, verbose_name='awesome event')
class Guest(models.Model):
event = models.OneToOneField(Event, models.CASCADE)
name = models.CharField(max_length=255)
class Meta:
verbose_name = "awesome guest"
class EventGuide(models.Model):
event = models.ForeignKey(Event, models.DO_NOTHING)
class Vehicle(models.Model):
pass
class VehicleMixin(Vehicle):
vehicle = models.OneToOneField(
Vehicle,
models.CASCADE,
parent_link=True,
related_name='vehicle_%(app_label)s_%(class)s',
)
class Meta:
abstract = True
class Car(VehicleMixin):
pass
| bsd-3-clause |
ndebuhr/thermo-state-solver | thermo-env/lib/python3.5/site-packages/setuptools/monkey.py | 80 | 5255 | """
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
import inspect
from .py26compat import import_module
import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in inspect.getmro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 0) < sys.version_info < (3, 3, 7)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata_write_pkg_file()
_patch_distribution_metadata_write_pkg_info()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata_write_pkg_file():
"""Patch write_pkg_file to also write Requires-Python/Requires-External"""
distutils.dist.DistributionMetadata.write_pkg_file = (
setuptools.dist.write_pkg_file
)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
distutils.dist.DistributionMetadata.write_pkg_info = (
setuptools.dist.write_pkg_info
)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass
| mit |
smarkwell/asuswrt-merlin | release/src/router/libxml2/python/tests/reader.py | 87 | 12441 | #!/usr/bin/python -u
# -*- coding: ISO-8859-1 -*-
#
# this tests the basic APIs of the XmlTextReader interface
#
import libxml2
import StringIO
import sys
# Memory debug specific
libxml2.debugMemory(1)
f = StringIO.StringIO("""<a><b b1="b1"/><c>content of c</c></a>""")
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test1")
ret = reader.Read()
if ret != 1:
print "test1: Error reading to first element"
sys.exit(1)
if reader.Name() != "a" or reader.IsEmptyElement() != 0 or \
reader.NodeType() != 1 or reader.HasAttributes() != 0:
print "test1: Error reading the first element"
sys.exit(1)
ret = reader.Read()
if ret != 1:
print "test1: Error reading to second element"
sys.exit(1)
if reader.Name() != "b" or reader.IsEmptyElement() != 1 or \
reader.NodeType() != 1 or reader.HasAttributes() != 1:
print "test1: Error reading the second element"
sys.exit(1)
ret = reader.Read()
if ret != 1:
print "test1: Error reading to third element"
sys.exit(1)
if reader.Name() != "c" or reader.IsEmptyElement() != 0 or \
reader.NodeType() != 1 or reader.HasAttributes() != 0:
print "test1: Error reading the third element"
sys.exit(1)
ret = reader.Read()
if ret != 1:
print "test1: Error reading to text node"
sys.exit(1)
if reader.Name() != "#text" or reader.IsEmptyElement() != 0 or \
reader.NodeType() != 3 or reader.HasAttributes() != 0 or \
reader.Value() != "content of c":
print "test1: Error reading the text node"
sys.exit(1)
ret = reader.Read()
if ret != 1:
print "test1: Error reading to end of third element"
sys.exit(1)
if reader.Name() != "c" or reader.IsEmptyElement() != 0 or \
reader.NodeType() != 15 or reader.HasAttributes() != 0:
print "test1: Error reading the end of third element"
sys.exit(1)
ret = reader.Read()
if ret != 1:
print "test1: Error reading to end of first element"
sys.exit(1)
if reader.Name() != "a" or reader.IsEmptyElement() != 0 or \
reader.NodeType() != 15 or reader.HasAttributes() != 0:
print "test1: Error reading the end of first element"
sys.exit(1)
ret = reader.Read()
if ret != 0:
print "test1: Error reading to end of document"
sys.exit(1)
#
# example from the XmlTextReader docs
#
f = StringIO.StringIO("""<test xmlns:dt="urn:datatypes" dt:type="int"/>""")
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test2")
ret = reader.Read()
if ret != 1:
print "Error reading test element"
sys.exit(1)
if reader.GetAttributeNo(0) != "urn:datatypes" or \
reader.GetAttributeNo(1) != "int" or \
reader.GetAttributeNs("type", "urn:datatypes") != "int" or \
reader.GetAttribute("dt:type") != "int":
print "error reading test attributes"
sys.exit(1)
#
# example from the XmlTextReader docs
#
f = StringIO.StringIO("""<root xmlns:a="urn:456">
<item>
<ref href="a:b"/>
</item>
</root>""")
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test3")
ret = reader.Read()
while ret == 1:
if reader.Name() == "ref":
if reader.LookupNamespace("a") != "urn:456":
print "error resolving namespace prefix"
sys.exit(1)
break
ret = reader.Read()
if ret != 1:
print "Error finding the ref element"
sys.exit(1)
#
# Home made example for the various attribute access functions
#
f = StringIO.StringIO("""<testattr xmlns="urn:1" xmlns:a="urn:2" b="b" a:b="a:b"/>""")
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test4")
ret = reader.Read()
if ret != 1:
print "Error reading the testattr element"
sys.exit(1)
#
# Attribute exploration by index
#
if reader.MoveToAttributeNo(0) != 1:
print "Failed moveToAttribute(0)"
sys.exit(1)
if reader.Value() != "urn:1":
print "Failed to read attribute(0)"
sys.exit(1)
if reader.Name() != "xmlns":
print "Failed to read attribute(0) name"
sys.exit(1)
if reader.MoveToAttributeNo(1) != 1:
print "Failed moveToAttribute(1)"
sys.exit(1)
if reader.Value() != "urn:2":
print "Failed to read attribute(1)"
sys.exit(1)
if reader.Name() != "xmlns:a":
print "Failed to read attribute(1) name"
sys.exit(1)
if reader.MoveToAttributeNo(2) != 1:
print "Failed moveToAttribute(2)"
sys.exit(1)
if reader.Value() != "b":
print "Failed to read attribute(2)"
sys.exit(1)
if reader.Name() != "b":
print "Failed to read attribute(2) name"
sys.exit(1)
if reader.MoveToAttributeNo(3) != 1:
print "Failed moveToAttribute(3)"
sys.exit(1)
if reader.Value() != "a:b":
print "Failed to read attribute(3)"
sys.exit(1)
if reader.Name() != "a:b":
print "Failed to read attribute(3) name"
sys.exit(1)
#
# Attribute exploration by name
#
if reader.MoveToAttribute("xmlns") != 1:
print "Failed moveToAttribute('xmlns')"
sys.exit(1)
if reader.Value() != "urn:1":
print "Failed to read attribute('xmlns')"
sys.exit(1)
if reader.MoveToAttribute("xmlns:a") != 1:
print "Failed moveToAttribute('xmlns')"
sys.exit(1)
if reader.Value() != "urn:2":
print "Failed to read attribute('xmlns:a')"
sys.exit(1)
if reader.MoveToAttribute("b") != 1:
print "Failed moveToAttribute('b')"
sys.exit(1)
if reader.Value() != "b":
print "Failed to read attribute('b')"
sys.exit(1)
if reader.MoveToAttribute("a:b") != 1:
print "Failed moveToAttribute('a:b')"
sys.exit(1)
if reader.Value() != "a:b":
print "Failed to read attribute('a:b')"
sys.exit(1)
if reader.MoveToAttributeNs("b", "urn:2") != 1:
print "Failed moveToAttribute('b', 'urn:2')"
sys.exit(1)
if reader.Value() != "a:b":
print "Failed to read attribute('b', 'urn:2')"
sys.exit(1)
#
# Go back and read in sequence
#
if reader.MoveToElement() != 1:
print "Failed to move back to element"
sys.exit(1)
if reader.MoveToFirstAttribute() != 1:
print "Failed to move to first attribute"
sys.exit(1)
if reader.Value() != "urn:1":
print "Failed to read attribute(0)"
sys.exit(1)
if reader.Name() != "xmlns":
print "Failed to read attribute(0) name"
sys.exit(1)
if reader.MoveToNextAttribute() != 1:
print "Failed to move to next attribute"
sys.exit(1)
if reader.Value() != "urn:2":
print "Failed to read attribute(1)"
sys.exit(1)
if reader.Name() != "xmlns:a":
print "Failed to read attribute(1) name"
sys.exit(1)
if reader.MoveToNextAttribute() != 1:
print "Failed to move to next attribute"
sys.exit(1)
if reader.Value() != "b":
print "Failed to read attribute(2)"
sys.exit(1)
if reader.Name() != "b":
print "Failed to read attribute(2) name"
sys.exit(1)
if reader.MoveToNextAttribute() != 1:
print "Failed to move to next attribute"
sys.exit(1)
if reader.Value() != "a:b":
print "Failed to read attribute(3)"
sys.exit(1)
if reader.Name() != "a:b":
print "Failed to read attribute(3) name"
sys.exit(1)
if reader.MoveToNextAttribute() != 0:
print "Failed to detect last attribute"
sys.exit(1)
#
# a couple of tests for namespace nodes
#
f = StringIO.StringIO("""<a xmlns="http://example.com/foo"/>""")
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test6")
ret = reader.Read()
if ret != 1:
print "test6: failed to Read()"
sys.exit(1)
ret = reader.MoveToFirstAttribute()
if ret != 1:
print "test6: failed to MoveToFirstAttribute()"
sys.exit(1)
if reader.NamespaceUri() != "http://www.w3.org/2000/xmlns/" or \
reader.LocalName() != "xmlns" or reader.Name() != "xmlns" or \
reader.Value() != "http://example.com/foo" or reader.NodeType() != 2:
print "test6: failed to read the namespace node"
sys.exit(1)
f = StringIO.StringIO("""<a xmlns:prefix="http://example.com/foo"/>""")
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test7")
ret = reader.Read()
if ret != 1:
print "test7: failed to Read()"
sys.exit(1)
ret = reader.MoveToFirstAttribute()
if ret != 1:
print "test7: failed to MoveToFirstAttribute()"
sys.exit(1)
if reader.NamespaceUri() != "http://www.w3.org/2000/xmlns/" or \
reader.LocalName() != "prefix" or reader.Name() != "xmlns:prefix" or \
reader.Value() != "http://example.com/foo" or reader.NodeType() != 2:
print "test7: failed to read the namespace node"
sys.exit(1)
#
# Test for a limit case:
#
f = StringIO.StringIO("""<a/>""")
input = libxml2.inputBuffer(f)
reader = input.newTextReader("test8")
ret = reader.Read()
if ret != 1:
print "test8: failed to read the node"
sys.exit(1)
if reader.Name() != "a" or reader.IsEmptyElement() != 1:
print "test8: failed to analyze the node"
sys.exit(1)
ret = reader.Read()
if ret != 0:
print "test8: failed to detect the EOF"
sys.exit(1)
#
# Another test provided by Stéphane Bidoul and checked with C#
#
def tst_reader(s):
f = StringIO.StringIO(s)
input = libxml2.inputBuffer(f)
reader = input.newTextReader("tst")
res = ""
while reader.Read():
res=res + "%s (%s) [%s] %d %d\n" % (reader.NodeType(),reader.Name(),
reader.Value(), reader.IsEmptyElement(),
reader.Depth())
if reader.NodeType() == 1: # Element
while reader.MoveToNextAttribute():
res = res + "-- %s (%s) [%s] %d %d\n" % (reader.NodeType(),
reader.Name(),reader.Value(),
reader.IsEmptyElement(), reader.Depth())
return res
doc="""<a><b b1="b1"/><c>content of c</c></a>"""
expect="""1 (a) [None] 0 0
1 (b) [None] 1 1
-- 2 (b1) [b1] 0 2
1 (c) [None] 0 1
3 (#text) [content of c] 0 2
15 (c) [None] 0 1
15 (a) [None] 0 0
"""
res = tst_reader(doc)
if res != expect:
print "test5 failed"
print res
sys.exit(1)
doc="""<test><b/><c/></test>"""
expect="""1 (test) [None] 0 0
1 (b) [None] 1 1
1 (c) [None] 1 1
15 (test) [None] 0 0
"""
res = tst_reader(doc)
if res != expect:
print "test9 failed"
print res
sys.exit(1)
doc="""<a><b>bbb</b><c>ccc</c></a>"""
expect="""1 (a) [None] 0 0
1 (b) [None] 0 1
3 (#text) [bbb] 0 2
15 (b) [None] 0 1
1 (c) [None] 0 1
3 (#text) [ccc] 0 2
15 (c) [None] 0 1
15 (a) [None] 0 0
"""
res = tst_reader(doc)
if res != expect:
print "test10 failed"
print res
sys.exit(1)
doc="""<test a="a"/>"""
expect="""1 (test) [None] 1 0
-- 2 (a) [a] 0 1
"""
res = tst_reader(doc)
if res != expect:
print "test11 failed"
print res
sys.exit(1)
doc="""<test><a>aaa</a><b/></test>"""
expect="""1 (test) [None] 0 0
1 (a) [None] 0 1
3 (#text) [aaa] 0 2
15 (a) [None] 0 1
1 (b) [None] 1 1
15 (test) [None] 0 0
"""
res = tst_reader(doc)
if res != expect:
print "test12 failed"
print res
sys.exit(1)
doc="""<test><p></p></test>"""
expect="""1 (test) [None] 0 0
1 (p) [None] 0 1
15 (p) [None] 0 1
15 (test) [None] 0 0
"""
res = tst_reader(doc)
if res != expect:
print "test13 failed"
print res
sys.exit(1)
doc="""<p></p>"""
expect="""1 (p) [None] 0 0
15 (p) [None] 0 0
"""
res = tst_reader(doc)
if res != expect:
print "test14 failed"
print res
sys.exit(1)
#
# test from bug #108801
#
doc="""<?xml version="1.0" standalone="no"?>
<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
]>
<article>
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
</article>
"""
expect="""10 (article) [None] 0 0
1 (article) [None] 0 0
3 (#text) [
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
] 0 1
15 (article) [None] 0 0
"""
res = tst_reader(doc)
if res != expect:
print "test15 failed"
print res
sys.exit(1)
#
# cleanup for memory allocation counting
#
del f
del input
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-2.0 |
garnachod/classification | src/Clasificadores/NodosPalabras/Nodo.py | 1 | 1286 | import random
class Nodo(object):
"""docstring for Nodo"""
def __init__(self, palabra):
super(Nodo, self).__init__()
self.nodosConectados = {}
self.palabra = palabra
def setConexo(self, nodo):
#peso = random.random() - 0.5
peso = 0.1
palabra = nodo.getPalabra()
self.nodosConectados[palabra] = peso
#peso = random.random() - 0.5
peso = 0.1
palabra = self.getPalabra()
nodo.nodosConectados[palabra] = peso
def isConexo(self, palabra):
if palabra in self.nodosConectados:
return True
else:
return False
def getPalabra(self):
return self.palabra
def getPeso(self, palabra):
peso = self.nodosConectados[palabra]
return peso
def sumaAlPeso(self, palabra, cantidad):
self.nodosConectados[palabra] += cantidad
def randomizaTodosPesos(self):
for palabra in self.nodosConectados:
self.nodosConectados[palabra] += (random.random() - 0.5) * 0.1
def randomizaProporPesos(self, probabilidad, alpha):
for palabra in self.nodosConectados:
if random.random() <= probabilidad:
self.nodosConectados[palabra] += (random.random() - 0.5) * alpha
def duplica(self):
duplicado = Nodo(self.palabra)
for palabra in self.nodosConectados:
duplicado.nodosConectados[palabra] = float(self.nodosConectados[palabra])
return duplicado
| apache-2.0 |
zahodi/ansible | lib/ansible/modules/system/lvg.py | 25 | 9874 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
# based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
author: "Alexander Bulimov (@abulimov)"
module: lvg
short_description: Configure LVM volume groups
description:
- This module creates, removes or resizes volume groups.
version_added: "1.1"
options:
vg:
description:
- The name of the volume group.
required: true
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.
- The module will take care of running pvcreate if needed.
required: false
pesize:
description:
- The size of the physical extent in megabytes. Must be a power of 2.
default: 4
required: false
vg_options:
description:
- Additional options to pass to C(vgcreate) when creating the volume group.
default: null
required: false
version_added: "1.6"
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the volume group exists.
required: false
force:
choices: [ "yes", "no" ]
default: "no"
description:
- If yes, allows to remove volume group with logical volumes.
required: false
notes:
- module does not modify PE size for already present volume group
'''
EXAMPLES = '''
# Create a volume group on top of /dev/sda1 with physical extent size = 32MB.
- lvg:
vg: vg.services
pvs: /dev/sda1
pesize: 32
# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
# If, for example, we already have VG vg.services on top of /dev/sdb1,
# this VG will be extended by /dev/sdc5. Or if vg.services was created on
# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
# and then reduce by /dev/sda5.
- lvg:
vg: vg.services
pvs: /dev/sdb1,/dev/sdc5
# Remove a volume group with name vg.services.
- lvg:
vg: vg.services
state: absent
'''
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'pv_count': int(parts[1]),
'lv_count': int(parts[2]),
})
return vgs
def find_mapper_device_name(module, dm_device):
dmsetup_cmd = module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc != 0:
module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def parse_pvs(module, data):
pvs = []
dm_prefix = '/dev/dm-'
for line in data.splitlines():
parts = line.strip().split(';')
if parts[0].startswith(dm_prefix):
parts[0] = find_mapper_device_name(module, parts[0])
pvs.append({
'name': parts[0],
'vg_name': parts[1],
})
return pvs
def main():
module = AnsibleModule(
argument_spec = dict(
vg=dict(required=True),
pvs=dict(type='list'),
pesize=dict(type='int', default=4),
vg_options=dict(default=''),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
),
supports_check_mode=True,
)
vg = module.params['vg']
state = module.params['state']
force = module.boolean(module.params['force'])
pesize = module.params['pesize']
vgoptions = module.params['vg_options'].split()
dev_list = []
if module.params['pvs']:
dev_list = module.params['pvs']
elif state == 'present':
module.fail_json(msg="No physical volumes given.")
# LVM always uses real paths not symlinks so replace symlinks with actual path
for idx, dev in enumerate(dev_list):
dev_list[idx] = os.path.realpath(dev)
if state=='present':
### check given devices
for test_dev in dev_list:
if not os.path.exists(test_dev):
module.fail_json(msg="Device %s not found."%test_dev)
### get pv list
pvs_cmd = module.get_bin_path('pvs', True)
rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd)
if rc != 0:
module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err)
### check pv for devices
pvs = parse_pvs(module, current_pvs)
used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]
if used_pvs:
module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name']))
vgs_cmd = module.get_bin_path('vgs', True)
rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
if rc != 0:
module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err)
changed = False
vgs = parse_vgs(current_vgs)
for test_vg in vgs:
if test_vg['name'] == vg:
this_vg = test_vg
break
else:
this_vg = None
if this_vg is None:
if state == 'present':
### create VG
if module.check_mode:
changed = True
else:
### create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in dev_list:
rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd,current_dev))
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
vgcreate_cmd = module.get_bin_path('vgcreate')
rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err)
else:
if state == 'absent':
if module.check_mode:
module.exit_json(changed=True)
else:
if this_vg['lv_count'] == 0 or force:
### remove VG
vgremove_cmd = module.get_bin_path('vgremove', True)
rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err)
else:
module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg))
### resize VG
current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]
devs_to_remove = list(set(current_devs) - set(dev_list))
devs_to_add = list(set(dev_list) - set(current_devs))
if devs_to_add or devs_to_remove:
if module.check_mode:
changed = True
else:
if devs_to_add:
devs_to_add_string = ' '.join(devs_to_add)
### create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in devs_to_add:
rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd, current_dev))
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err)
### add PV to our VG
vgextend_cmd = module.get_bin_path('vgextend', True)
rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
if rc == 0:
changed = True
else:
module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err)
### remove some PV from our VG
if devs_to_remove:
devs_to_remove_string = ' '.join(devs_to_remove)
vgreduce_cmd = module.get_bin_path('vgreduce', True)
rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
if rc == 0:
changed = True
else:
module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
hsaputra/tensorflow | tensorflow/compiler/tests/stateless_random_ops_test.py | 18 | 4817 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random-number generation ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.contrib import stateless
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class StatelessRandomOpsTest(XLATestCase):
"""Test cases for stateless random-number generator operators."""
def _random_types(self):
return [dtypes.float32]
def testDeterminism(self):
# Stateless values should be equal iff the seeds are equal (roughly)
with self.test_session(), self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for stateless_op in [
stateless.stateless_random_uniform, stateless.stateless_random_normal
]:
for shape in (), (3,), (2, 5):
for dtype in self._random_types():
pure = stateless_op(shape, seed=seed_t, dtype=dtype)
values = [(seed, pure.eval(feed_dict={
seed_t: seed
})) for seed in seeds]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def testRandomUniformIsInRange(self):
with self.test_session() as sess, self.test_scope():
for dtype in self._random_types():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless.stateless_random_uniform(
shape=[1000], seed=seed_t, dtype=dtype)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
self.assertTrue(np.all(y >= 0))
self.assertTrue(np.all(y < 1))
def _chi_squared(self, x, bins):
"""Pearson's Chi-squared test."""
x = np.ravel(x)
n = len(x)
histogram, _ = np.histogram(x, bins=bins, range=(0, 1))
expected = n / float(bins)
return np.sum(np.square(histogram - expected) / expected)
def testDistributionOfStatelessRandomUniform(self):
"""Use Pearson's Chi-squared test to test for uniformity."""
with self.test_session() as sess, self.test_scope():
for dtype in self._random_types():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
n = 1000
x = stateless.stateless_random_uniform(
shape=[n], seed=seed_t, dtype=dtype)
y = sess.run(x, {seed_t: [565656, 121212]})
# Tests that the values are distributed amongst 10 bins with equal
# probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
# p=0.05. This test is probabilistic and would be flaky if the random
# seed were not fixed.
self.assertTrue(self._chi_squared(y, 10) < 16.92)
def _normal_cdf(self, x):
"""Cumulative distribution function for a standard normal distribution."""
return 0.5 + 0.5 * np.vectorize(math.erf)(x / math.sqrt(2))
def _anderson_darling(self, x):
"""Anderson-Darling test for a standard normal distribution."""
x = np.sort(np.ravel(x))
n = len(x)
i = np.linspace(1, n, n)
z = np.sum((2 * i - 1) * np.log(self._normal_cdf(x)) +
(2 * (n - i) + 1) * np.log(1 - self._normal_cdf(x)))
return -n - z / n
def testDistributionOfStatelessRandomNormal(self):
"""Use Anderson-Darling test to test distribution appears normal."""
with self.test_session() as sess, self.test_scope():
for dtype in self._random_types():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
n = 1000
x = stateless.stateless_random_normal(
shape=[n], seed=seed_t, dtype=dtype)
y = sess.run(x, {seed_t: [25252, 314159]})
# The constant 2.492 is the 5% critical value for the Anderson-Darling
# test where the mean and variance are known. This test is probabilistic
# so to avoid flakiness the seed is fixed.
self.assertTrue(self._anderson_darling(y) < 2.492)
if __name__ == '__main__':
test.main()
| apache-2.0 |
carlohamalainen/nipype | examples/fmri_fsl_reuse.py | 14 | 9833 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
=========================
fMRI: FSL reuse workflows
=========================
A workflow that uses fsl to perform a first level analysis on the nipype
tutorial data set::
python fmri_fsl_reuse.py
First tell python where to find the appropriate functions.
"""
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model generation
import nipype.algorithms.rapidart as ra # artifact detection
from nipype.workflows.fmri.fsl import (create_featreg_preproc,
create_modelfit_workflow,
create_fixed_effects_flow)
"""
Preliminaries
-------------
Setup any package specific configuration. The output file format for FSL
routines is being set to compressed NIFTI.
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
level1_workflow = pe.Workflow(name='level1flow')
preproc = create_featreg_preproc(whichvol='first')
modelfit = create_modelfit_workflow()
fixed_fx = create_fixed_effects_flow()
"""
Add artifact detection and model specification nodes between the preprocessing
and modelfitting workflows.
"""
art = pe.MapNode(interface=ra.ArtifactDetect(use_differences = [True, False],
use_norm = True,
norm_threshold = 1,
zintensity_threshold = 3,
parameter_source = 'FSL',
mask_type = 'file'),
iterfield=['realigned_files', 'realignment_parameters', 'mask_file'],
name="art")
modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
level1_workflow.connect([(preproc, art, [('outputspec.motion_parameters',
'realignment_parameters'),
('outputspec.realigned_files',
'realigned_files'),
('outputspec.mask', 'mask_file')]),
(preproc, modelspec, [('outputspec.highpassed_files',
'functional_runs'),
('outputspec.motion_parameters',
'realignment_parameters')]),
(art, modelspec, [('outlier_files', 'outlier_files')]),
(modelspec, modelfit, [('session_info', 'inputspec.session_info')]),
(preproc, modelfit, [('outputspec.highpassed_files', 'inputspec.functional_data')])
])
"""
Set up first-level workflow
---------------------------
"""
def sort_copes(files):
numelements = len(files[0])
outfiles = []
for i in range(numelements):
outfiles.insert(i,[])
for j, elements in enumerate(files):
outfiles[i].append(elements[i])
return outfiles
def num_copes(files):
return len(files)
pickfirst = lambda x : x[0]
level1_workflow.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
'flameo.mask_file')]),
(modelfit, fixed_fx, [(('outputspec.copes', sort_copes),
'inputspec.copes'),
('outputspec.dof_file',
'inputspec.dof_files'),
(('outputspec.varcopes',
sort_copes),
'inputspec.varcopes'),
(('outputspec.copes', num_copes),
'l2model.num_copes'),
])
])
"""
Experiment specific components
------------------------------
The nipype tutorial contains data for two subjects. Subject data
is in two subdirectories, ``s1`` and ``s2``. Each subject directory
contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And
one anatomical volume named struct.nii.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
In the example below, run 'f3' is of type 'func' and gets mapped to a
nifti filename through a template '%s.nii'. So 'f3' would become
'f3.nii'.
"""
# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1'] #, 's3']
# Map field names to individual subject runs.
info = dict(func=[['subject_id', ['f3','f5','f7','f10']]],
struct=[['subject_id','struct']])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataSource` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name = 'datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Use the get_node function to retrieve an internal node by name. Then set the
iterables on this node to perform two different extents of smoothing.
"""
inputnode = level1_workflow.get_node('featpreproc.inputspec')
inputnode.iterables = ('fwhm', [5.,10.])
hpcutoff = 120.
TR = 3.
inputnode.inputs.highpass = hpcutoff/(2*TR)
"""
Setup a function that returns subject-specific information about the
experimental paradigm. This is used by the
:class:`nipype.modelgen.SpecifyModel` to create the information necessary
to generate an SPM design matrix. In this tutorial, the same paradigm was used
for every participant. Other examples of this function are available in the
`doc/examples` folder. Note: Python knowledge required here.
"""
def subjectinfo(subject_id):
from nipype.interfaces.base import Bunch
from copy import deepcopy
print "Subject ID: %s\n"%str(subject_id)
output = []
names = ['Task-Odd','Task-Even']
for r in range(4):
onsets = [range(15,240,60),range(45,240,60)]
output.insert(r,
Bunch(conditions=names,
onsets=deepcopy(onsets),
durations=[[15] for s in names]))
return output
"""
Setup the contrast structure that needs to be evaluated. This is a list of
lists. The inner list specifies the contrasts and has the following format -
[Name,Stat,[list of condition names],[weights on those conditions]. The
condition names must match the `names` listed in the `subjectinfo` function
described above.
"""
cont1 = ['Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]]
cont2 = ['Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]]
cont3 = ['Task','F', [cont1, cont2]]
contrasts = [cont1,cont2]
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = hpcutoff
modelfit.inputs.inputspec.interscan_interval = TR
modelfit.inputs.inputspec.bases = {'dgamma':{'derivs': False}}
modelfit.inputs.inputspec.contrasts = contrasts
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.film_threshold = 1000
level1_workflow.base_dir = os.path.abspath('./fsl/workingdir')
level1_workflow.config['execution'] = dict(crashdump_dir=os.path.abspath('./fsl/crashdumps'))
level1_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(infosource, modelspec, [(('subject_id', subjectinfo),
'subject_info')]),
(datasource, preproc, [('func', 'inputspec.func')]),
])
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures with
appropriate parameters and the connectivity between the processes, but does not
generate any output. To actually run the analysis on the data the
``nipype.pipeline.engine.Pipeline.Run`` function needs to be called.
"""
if __name__ == '__main__':
#level1_workflow.write_graph()
level1_workflow.run()
#level1_workflow.run(plugin='MultiProc', plugin_args={'n_procs':2})
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/twisted/mail/imap4.py | 10 | 214306 | # -*- test-case-name: twisted.mail.test.test_imap -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An IMAP4 protocol implementation
@author: Jp Calderone
To do::
Suspend idle timeout while server is processing
Use an async message parser instead of buffering in memory
Figure out a way to not queue multi-message client requests (Flow? A simple callback?)
Clarify some API docs (Query, etc)
Make APPEND recognize (again) non-existent mailboxes before accepting the literal
"""
import rfc822
import base64
import binascii
import hmac
import re
import copy
import tempfile
import string
import time
import random
import types
import email.Utils
try:
import cStringIO as StringIO
except:
import StringIO
from zope.interface import implements, Interface
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import defer
from twisted.internet import error
from twisted.internet.defer import maybeDeferred
from twisted.python import log, text
from twisted.internet import interfaces
from twisted.cred import credentials
from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials
# locale-independent month names to use instead of strftime's
_MONTH_NAMES = dict(zip(
range(1, 13),
"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split()))
class MessageSet(object):
"""
Essentially an infinite bitfield, with some extra features.
@type getnext: Function taking C{int} returning C{int}
@ivar getnext: A function that returns the next message number,
used when iterating through the MessageSet. By default, a function
returning the next integer is supplied, but as this can be rather
inefficient for sparse UID iterations, it is recommended to supply
one when messages are requested by UID. The argument is provided
as a hint to the implementation and may be ignored if it makes sense
to do so (eg, if an iterator is being used that maintains its own
state, it is guaranteed that it will not be called out-of-order).
"""
_empty = []
def __init__(self, start=_empty, end=_empty):
"""
Create a new MessageSet()
@type start: Optional C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
self._last = self._empty # Last message/UID in use
self.ranges = [] # List of ranges included
self.getnext = lambda x: x+1 # A function which will return the next
# message id. Handy for UID requests.
if start is self._empty:
return
if isinstance(start, types.ListType):
self.ranges = start[:]
self.clean()
else:
self.add(start,end)
# Ooo. A property.
def last():
def _setLast(self, value):
if self._last is not self._empty:
raise ValueError("last already set")
self._last = value
for i, (l, h) in enumerate(self.ranges):
if l is not None:
break # There are no more Nones after this
l = value
if h is None:
h = value
if l > h:
l, h = h, l
self.ranges[i] = (l, h)
self.clean()
def _getLast(self):
return self._last
doc = '''
"Highest" message number, referred to by "*".
Must be set before attempting to use the MessageSet.
'''
return _getLast, _setLast, None, doc
last = property(*last())
def add(self, start, end=_empty):
"""
Add another range
@type start: C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
if end is self._empty:
end = start
if self._last is not self._empty:
if start is None:
start = self.last
if end is None:
end = self.last
if start > end:
# Try to keep in low, high order if possible
# (But we don't know what None means, this will keep
# None at the start of the ranges list)
start, end = end, start
self.ranges.append((start, end))
self.clean()
def __add__(self, other):
if isinstance(other, MessageSet):
ranges = self.ranges + other.ranges
return MessageSet(ranges)
else:
res = MessageSet(self.ranges)
try:
res.add(*other)
except TypeError:
res.add(other)
return res
def extend(self, other):
if isinstance(other, MessageSet):
self.ranges.extend(other.ranges)
self.clean()
else:
try:
self.add(*other)
except TypeError:
self.add(other)
return self
def clean(self):
"""
Clean ranges list, combining adjacent ranges
"""
self.ranges.sort()
oldl, oldh = None, None
for i,(l, h) in enumerate(self.ranges):
if l is None:
continue
# l is >= oldl and h is >= oldh due to sort()
if oldl is not None and l <= oldh + 1:
l = oldl
h = max(oldh, h)
self.ranges[i - 1] = None
self.ranges[i] = (l, h)
oldl, oldh = l, h
self.ranges = filter(None, self.ranges)
def __contains__(self, value):
"""
May raise TypeError if we encounter an open-ended range
"""
for l, h in self.ranges:
if l is None:
raise TypeError(
"Can't determine membership; last value not set")
if l <= value <= h:
return True
return False
def _iterator(self):
for l, h in self.ranges:
l = self.getnext(l-1)
while l <= h:
yield l
l = self.getnext(l)
if l is None:
break
def __iter__(self):
if self.ranges and self.ranges[0][0] is None:
raise TypeError("Can't iterate; last value not set")
return self._iterator()
def __len__(self):
res = 0
for l, h in self.ranges:
if l is None:
if h is None:
res += 1
else:
raise TypeError("Can't size object; last value not set")
else:
res += (h - l) + 1
return res
def __str__(self):
p = []
for low, high in self.ranges:
if low == high:
if low is None:
p.append('*')
else:
p.append(str(low))
elif low is None:
p.append('%d:*' % (high,))
else:
p.append('%d:%d' % (low, high))
return ','.join(p)
def __repr__(self):
return '<MessageSet %s>' % (str(self),)
def __eq__(self, other):
if isinstance(other, MessageSet):
return self.ranges == other.ranges
return False
class LiteralString:
def __init__(self, size, defered):
self.size = size
self.data = []
self.defer = defered
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.append(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.append(data)
return passon
def callback(self, line):
"""
Call deferred with data and rest of line
"""
self.defer.callback((''.join(self.data), line))
class LiteralFile:
_memoryFileLimit = 1024 * 1024 * 10
def __init__(self, size, defered):
self.size = size
self.defer = defered
if size > self._memoryFileLimit:
self.data = tempfile.TemporaryFile()
else:
self.data = StringIO.StringIO()
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.write(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.write(data)
return passon
def callback(self, line):
"""
Call deferred with data and rest of line
"""
self.data.seek(0,0)
self.defer.callback((self.data, line))
class WriteBuffer:
"""Buffer up a bunch of writes before sending them all to a transport at once.
"""
def __init__(self, transport, size=8192):
self.bufferSize = size
self.transport = transport
self._length = 0
self._writes = []
def write(self, s):
self._length += len(s)
self._writes.append(s)
if self._length > self.bufferSize:
self.flush()
def flush(self):
if self._writes:
self.transport.writeSequence(self._writes)
self._writes = []
self._length = 0
class Command:
_1_RESPONSES = ('CAPABILITY', 'FLAGS', 'LIST', 'LSUB', 'STATUS', 'SEARCH', 'NAMESPACE')
_2_RESPONSES = ('EXISTS', 'EXPUNGE', 'FETCH', 'RECENT')
_OK_RESPONSES = ('UIDVALIDITY', 'UNSEEN', 'READ-WRITE', 'READ-ONLY', 'UIDNEXT', 'PERMANENTFLAGS')
defer = None
def __init__(self, command, args=None, wantResponse=(),
continuation=None, *contArgs, **contKw):
self.command = command
self.args = args
self.wantResponse = wantResponse
self.continuation = lambda x: continuation(x, *contArgs, **contKw)
self.lines = []
def format(self, tag):
if self.args is None:
return ' '.join((tag, self.command))
return ' '.join((tag, self.command, self.args))
def finish(self, lastLine, unusedCallback):
send = []
unuse = []
for L in self.lines:
names = parseNestedParens(L)
N = len(names)
if (N >= 1 and names[0] in self._1_RESPONSES or
N >= 2 and names[1] in self._2_RESPONSES or
N >= 2 and names[0] == 'OK' and isinstance(names[1], types.ListType) and names[1][0] in self._OK_RESPONSES):
send.append(names)
else:
unuse.append(names)
d, self.defer = self.defer, None
d.callback((send, lastLine))
if unuse:
unusedCallback(unuse)
class LOGINCredentials(credentials.UsernamePassword):
def __init__(self):
self.challenges = ['Password\0', 'User Name\0']
self.responses = ['password', 'username']
credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return self.challenges.pop()
def setResponse(self, response):
setattr(self, self.responses.pop(), response)
def moreChallenges(self):
return bool(self.challenges)
class PLAINCredentials(credentials.UsernamePassword):
def __init__(self):
credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return ''
def setResponse(self, response):
parts = response.split('\0')
if len(parts) != 3:
raise IllegalClientResponse("Malformed Response - wrong number of parts")
useless, self.username, self.password = parts
def moreChallenges(self):
return False
class IMAP4Exception(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
class IllegalClientResponse(IMAP4Exception): pass
class IllegalOperation(IMAP4Exception): pass
class IllegalMailboxEncoding(IMAP4Exception): pass
class IMailboxListener(Interface):
"""Interface for objects interested in mailbox events"""
def modeChanged(writeable):
"""Indicates that the write status of a mailbox has changed.
@type writeable: C{bool}
@param writeable: A true value if write is now allowed, false
otherwise.
"""
def flagsChanged(newFlags):
"""Indicates that the flags of one or more messages have changed.
@type newFlags: C{dict}
@param newFlags: A mapping of message identifiers to tuples of flags
now set on that message.
"""
def newMessages(exists, recent):
"""Indicates that the number of messages in a mailbox has changed.
@type exists: C{int} or C{None}
@param exists: The total number of messages now in this mailbox.
If the total number of messages has not changed, this should be
C{None}.
@type recent: C{int}
@param recent: The number of messages now flagged \\Recent.
If the number of recent messages has not changed, this should be
C{None}.
"""
# Some constants to help define what an atom is and is not - see the grammar
# section of the IMAP4 RFC - <https://tools.ietf.org/html/rfc3501#section-9>.
# Some definitions (SP, CTL, DQUOTE) are also from the ABNF RFC -
# <https://tools.ietf.org/html/rfc2234>.
_SP = ' '
_CTL = ''.join(chr(ch) for ch in range(0x21) + range(0x80, 0x100))
# It is easier to define ATOM-CHAR in terms of what it does not match than in
# terms of what it does match.
_nonAtomChars = r'(){%*"\]' + _SP + _CTL
# This is all the bytes that match the ATOM-CHAR from the grammar in the RFC.
_atomChars = ''.join(chr(ch) for ch in range(0x100) if chr(ch) not in _nonAtomChars)
class IMAP4Server(basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol implementation for an IMAP4rev1 server.
The server can be in any of four states:
- Non-authenticated
- Authenticated
- Selected
- Logout
"""
implements(IMailboxListener)
# Identifier for this server software
IDENT = 'Twisted IMAP4rev1 Ready'
# Number of seconds before idle timeout
# Initially 1 minute. Raised to 30 minutes after login.
timeOut = 60
POSTAUTH_TIMEOUT = 60 * 30
# Whether STARTTLS has been issued successfully yet or not.
startedTLS = False
# Whether our transport supports TLS
canStartTLS = False
# Mapping of tags to commands we have received
tags = None
# The object which will handle logins for us
portal = None
# The account object for this connection
account = None
# Logout callback
_onLogout = None
# The currently selected mailbox
mbox = None
# Command data to be processed when literal data is received
_pendingLiteral = None
# Maximum length to accept for a "short" string literal
_literalStringLimit = 4096
# IChallengeResponse factories for AUTHENTICATE command
challengers = None
# Search terms the implementation of which needs to be passed both the last
# message identifier (UID) and the last sequence id.
_requiresLastMessageInfo = set(["OR", "NOT", "UID"])
state = 'unauth'
parseState = 'command'
def __init__(self, chal = None, contextFactory = None, scheduler = None):
if chal is None:
chal = {}
self.challengers = chal
self.ctx = contextFactory
if scheduler is None:
scheduler = iterateInReactor
self._scheduler = scheduler
self._queuedAsync = []
def capabilities(self):
cap = {'AUTH': self.challengers.keys()}
if self.ctx and self.canStartTLS:
if not self.startedTLS and interfaces.ISSLTransport(self.transport, None) is None:
cap['LOGINDISABLED'] = None
cap['STARTTLS'] = None
cap['NAMESPACE'] = None
cap['IDLE'] = None
return cap
def connectionMade(self):
self.tags = {}
self.canStartTLS = interfaces.ITLSTransport(self.transport, None) is not None
self.setTimeout(self.timeOut)
self.sendServerGreeting()
def connectionLost(self, reason):
self.setTimeout(None)
if self._onLogout:
self._onLogout()
self._onLogout = None
def timeoutConnection(self):
self.sendLine('* BYE Autologout; connection idle too long')
self.transport.loseConnection()
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'timeout'
def rawDataReceived(self, data):
self.resetTimeout()
passon = self._pendingLiteral.write(data)
if passon is not None:
self.setLineMode(passon)
# Avoid processing commands while buffers are being dumped to
# our transport
blocked = None
def _unblock(self):
commands = self.blocked
self.blocked = None
while commands and self.blocked is None:
self.lineReceived(commands.pop(0))
if self.blocked is not None:
self.blocked.extend(commands)
def lineReceived(self, line):
if self.blocked is not None:
self.blocked.append(line)
return
self.resetTimeout()
f = getattr(self, 'parse_' + self.parseState)
try:
f(line)
except Exception, e:
self.sendUntaggedResponse('BAD Server error: ' + str(e))
log.err()
def parse_command(self, line):
args = line.split(None, 2)
rest = None
if len(args) == 3:
tag, cmd, rest = args
elif len(args) == 2:
tag, cmd = args
elif len(args) == 1:
tag = args[0]
self.sendBadResponse(tag, 'Missing command')
return None
else:
self.sendBadResponse(None, 'Null command')
return None
cmd = cmd.upper()
try:
return self.dispatchCommand(tag, cmd, rest)
except IllegalClientResponse, e:
self.sendBadResponse(tag, 'Illegal syntax: ' + str(e))
except IllegalOperation, e:
self.sendNegativeResponse(tag, 'Illegal operation: ' + str(e))
except IllegalMailboxEncoding, e:
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' + str(e))
def parse_pending(self, line):
d = self._pendingLiteral
self._pendingLiteral = None
self.parseState = 'command'
d.callback(line)
def dispatchCommand(self, tag, cmd, rest, uid=None):
f = self.lookupCommand(cmd)
if f:
fn = f[0]
parseargs = f[1:]
self.__doCommand(tag, fn, [self, tag], parseargs, rest, uid)
else:
self.sendBadResponse(tag, 'Unsupported command')
def lookupCommand(self, cmd):
return getattr(self, '_'.join((self.state, cmd.upper())), None)
def __doCommand(self, tag, handler, args, parseargs, line, uid):
for (i, arg) in enumerate(parseargs):
if callable(arg):
parseargs = parseargs[i+1:]
maybeDeferred(arg, self, line).addCallback(
self.__cbDispatch, tag, handler, args,
parseargs, uid).addErrback(self.__ebDispatch, tag)
return
else:
args.append(arg)
if line:
# Too many arguments
raise IllegalClientResponse("Too many arguments for command: " + repr(line))
if uid is not None:
handler(uid=uid, *args)
else:
handler(*args)
def __cbDispatch(self, (arg, rest), tag, fn, args, parseargs, uid):
args.append(arg)
self.__doCommand(tag, fn, args, parseargs, rest, uid)
def __ebDispatch(self, failure, tag):
if failure.check(IllegalClientResponse):
self.sendBadResponse(tag, 'Illegal syntax: ' + str(failure.value))
elif failure.check(IllegalOperation):
self.sendNegativeResponse(tag, 'Illegal operation: ' +
str(failure.value))
elif failure.check(IllegalMailboxEncoding):
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' +
str(failure.value))
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def _stringLiteral(self, size):
if size > self._literalStringLimit:
raise IllegalClientResponse(
"Literal too long! I accept at most %d octets" %
(self._literalStringLimit,))
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralString(size, d)
self.sendContinuationRequest('Ready for %d octets of text' % size)
self.setRawMode()
return d
def _fileLiteral(self, size):
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralFile(size, d)
self.sendContinuationRequest('Ready for %d octets of data' % size)
self.setRawMode()
return d
def arg_astring(self, line):
"""
Parse an astring from the line, return (arg, rest), possibly
via a deferred (to handle literals)
"""
line = line.strip()
if not line:
raise IllegalClientResponse("Missing argument")
d = None
arg, rest = None, None
if line[0] == '"':
try:
spam, arg, rest = line.split('"',2)
rest = rest[1:] # Strip space
except ValueError:
raise IllegalClientResponse("Unmatched quotes")
elif line[0] == '{':
# literal
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
d = self._stringLiteral(size)
else:
arg = line.split(' ',1)
if len(arg) == 1:
arg.append('')
arg, rest = arg
return d or (arg, rest)
# ATOM: Any CHAR except ( ) { % * " \ ] CTL SP (CHAR is 7bit)
atomre = re.compile(r'(?P<atom>[%s]+)( (?P<rest>.*$)|$)' % (re.escape(_atomChars),))
def arg_atom(self, line):
"""
Parse an atom from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
m = self.atomre.match(line)
if m:
return m.group('atom'), m.group('rest')
else:
raise IllegalClientResponse("Malformed ATOM")
def arg_plist(self, line):
"""
Parse a (non-nested) parenthesised list from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != "(":
raise IllegalClientResponse("Missing parenthesis")
i = line.find(")")
if i == -1:
raise IllegalClientResponse("Mismatched parenthesis")
return (parseNestedParens(line[1:i],0), line[i+2:])
def arg_literal(self, line):
"""
Parse a literal from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != '{':
raise IllegalClientResponse("Missing literal")
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
return self._fileLiteral(size)
def arg_searchkeys(self, line):
"""
searchkeys
"""
query = parseNestedParens(line)
# XXX Should really use list of search terms and parse into
# a proper tree
return (query, '')
def arg_seqset(self, line):
"""
sequence-set
"""
rest = ''
arg = line.split(' ',1)
if len(arg) == 2:
rest = arg[1]
arg = arg[0]
try:
return (parseIdList(arg), rest)
except IllegalIdentifierError, e:
raise IllegalClientResponse("Bad message number " + str(e))
def arg_fetchatt(self, line):
"""
fetch-att
"""
p = _FetchParser()
p.parseString(line)
return (p.result, '')
def arg_flaglist(self, line):
"""
Flag part of store-att-flag
"""
flags = []
if line[0] == '(':
if line[-1] != ')':
raise IllegalClientResponse("Mismatched parenthesis")
line = line[1:-1]
while line:
m = self.atomre.search(line)
if not m:
raise IllegalClientResponse("Malformed flag")
if line[0] == '\\' and m.start() == 1:
flags.append('\\' + m.group('atom'))
elif m.start() == 0:
flags.append(m.group('atom'))
else:
raise IllegalClientResponse("Malformed flag")
line = m.group('rest')
return (flags, '')
def arg_line(self, line):
"""
Command line of UID command
"""
return (line, '')
def opt_plist(self, line):
"""
Optional parenthesised list
"""
if line.startswith('('):
return self.arg_plist(line)
else:
return (None, line)
def opt_datetime(self, line):
"""
Optional date-time string
"""
if line.startswith('"'):
try:
spam, date, rest = line.split('"',2)
except IndexError:
raise IllegalClientResponse("Malformed date-time")
return (date, rest[1:])
else:
return (None, line)
def opt_charset(self, line):
"""
Optional charset of SEARCH command
"""
if line[:7].upper() == 'CHARSET':
arg = line.split(' ',2)
if len(arg) == 1:
raise IllegalClientResponse("Missing charset identifier")
if len(arg) == 2:
arg.append('')
spam, arg, rest = arg
return (arg, rest)
else:
return (None, line)
def sendServerGreeting(self):
msg = '[CAPABILITY %s] %s' % (' '.join(self.listCapabilities()), self.IDENT)
self.sendPositiveResponse(message=msg)
def sendBadResponse(self, tag = None, message = ''):
self._respond('BAD', tag, message)
def sendPositiveResponse(self, tag = None, message = ''):
self._respond('OK', tag, message)
def sendNegativeResponse(self, tag = None, message = ''):
self._respond('NO', tag, message)
def sendUntaggedResponse(self, message, async=False):
if not async or (self.blocked is None):
self._respond(message, None, None)
else:
self._queuedAsync.append(message)
def sendContinuationRequest(self, msg = 'Ready for additional command text'):
if msg:
self.sendLine('+ ' + msg)
else:
self.sendLine('+')
def _respond(self, state, tag, message):
if state in ('OK', 'NO', 'BAD') and self._queuedAsync:
lines = self._queuedAsync
self._queuedAsync = []
for msg in lines:
self._respond(msg, None, None)
if not tag:
tag = '*'
if message:
self.sendLine(' '.join((tag, state, message)))
else:
self.sendLine(' '.join((tag, state)))
def listCapabilities(self):
caps = ['IMAP4rev1']
for c, v in self.capabilities().iteritems():
if v is None:
caps.append(c)
elif len(v):
caps.extend([('%s=%s' % (c, cap)) for cap in v])
return caps
def do_CAPABILITY(self, tag):
self.sendUntaggedResponse('CAPABILITY ' + ' '.join(self.listCapabilities()))
self.sendPositiveResponse(tag, 'CAPABILITY completed')
unauth_CAPABILITY = (do_CAPABILITY,)
auth_CAPABILITY = unauth_CAPABILITY
select_CAPABILITY = unauth_CAPABILITY
logout_CAPABILITY = unauth_CAPABILITY
def do_LOGOUT(self, tag):
self.sendUntaggedResponse('BYE Nice talking to you')
self.sendPositiveResponse(tag, 'LOGOUT successful')
self.transport.loseConnection()
unauth_LOGOUT = (do_LOGOUT,)
auth_LOGOUT = unauth_LOGOUT
select_LOGOUT = unauth_LOGOUT
logout_LOGOUT = unauth_LOGOUT
def do_NOOP(self, tag):
self.sendPositiveResponse(tag, 'NOOP No operation performed')
unauth_NOOP = (do_NOOP,)
auth_NOOP = unauth_NOOP
select_NOOP = unauth_NOOP
logout_NOOP = unauth_NOOP
def do_AUTHENTICATE(self, tag, args):
args = args.upper().strip()
if args not in self.challengers:
self.sendNegativeResponse(tag, 'AUTHENTICATE method unsupported')
else:
self.authenticate(self.challengers[args](), tag)
unauth_AUTHENTICATE = (do_AUTHENTICATE, arg_atom)
def authenticate(self, chal, tag):
if self.portal is None:
self.sendNegativeResponse(tag, 'Temporary authentication failure')
return
self._setupChallenge(chal, tag)
def _setupChallenge(self, chal, tag):
try:
challenge = chal.getChallenge()
except Exception, e:
self.sendBadResponse(tag, 'Server error: ' + str(e))
else:
coded = base64.encodestring(challenge)[:-1]
self.parseState = 'pending'
self._pendingLiteral = defer.Deferred()
self.sendContinuationRequest(coded)
self._pendingLiteral.addCallback(self.__cbAuthChunk, chal, tag)
self._pendingLiteral.addErrback(self.__ebAuthChunk, tag)
def __cbAuthChunk(self, result, chal, tag):
try:
uncoded = base64.decodestring(result)
except binascii.Error:
raise IllegalClientResponse("Malformed Response - not base64")
chal.setResponse(uncoded)
if chal.moreChallenges():
self._setupChallenge(chal, tag)
else:
self.portal.login(chal, None, IAccount).addCallbacks(
self.__cbAuthResp,
self.__ebAuthResp,
(tag,), None, (tag,), None
)
def __cbAuthResp(self, (iface, avatar, logout), tag):
assert iface is IAccount, "IAccount is the only supported interface"
self.account = avatar
self.state = 'auth'
self._onLogout = logout
self.sendPositiveResponse(tag, 'Authentication successful')
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebAuthResp(self, failure, tag):
if failure.check(UnauthorizedLogin):
self.sendNegativeResponse(tag, 'Authentication failed: unauthorized')
elif failure.check(UnhandledCredentials):
self.sendNegativeResponse(tag, 'Authentication failed: server misconfigured')
else:
self.sendBadResponse(tag, 'Server error: login failed unexpectedly')
log.err(failure)
def __ebAuthChunk(self, failure, tag):
self.sendNegativeResponse(tag, 'Authentication failed: ' + str(failure.value))
def do_STARTTLS(self, tag):
if self.startedTLS:
self.sendNegativeResponse(tag, 'TLS already negotiated')
elif self.ctx and self.canStartTLS:
self.sendPositiveResponse(tag, 'Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
self.startedTLS = True
self.challengers = self.challengers.copy()
if 'LOGIN' not in self.challengers:
self.challengers['LOGIN'] = LOGINCredentials
if 'PLAIN' not in self.challengers:
self.challengers['PLAIN'] = PLAINCredentials
else:
self.sendNegativeResponse(tag, 'TLS not available')
unauth_STARTTLS = (do_STARTTLS,)
def do_LOGIN(self, tag, user, passwd):
if 'LOGINDISABLED' in self.capabilities():
self.sendBadResponse(tag, 'LOGIN is disabled before STARTTLS')
return
maybeDeferred(self.authenticateLogin, user, passwd
).addCallback(self.__cbLogin, tag
).addErrback(self.__ebLogin, tag
)
unauth_LOGIN = (do_LOGIN, arg_astring, arg_astring)
def authenticateLogin(self, user, passwd):
"""Lookup the account associated with the given parameters
Override this method to define the desired authentication behavior.
The default behavior is to defer authentication to C{self.portal}
if it is not None, or to deny the login otherwise.
@type user: C{str}
@param user: The username to lookup
@type passwd: C{str}
@param passwd: The password to login with
"""
if self.portal:
return self.portal.login(
credentials.UsernamePassword(user, passwd),
None, IAccount
)
raise UnauthorizedLogin()
def __cbLogin(self, (iface, avatar, logout), tag):
if iface is not IAccount:
self.sendBadResponse(tag, 'Server error: login returned unexpected value')
log.err("__cbLogin called with %r, IAccount expected" % (iface,))
else:
self.account = avatar
self._onLogout = logout
self.sendPositiveResponse(tag, 'LOGIN succeeded')
self.state = 'auth'
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebLogin(self, failure, tag):
if failure.check(UnauthorizedLogin):
self.sendNegativeResponse(tag, 'LOGIN failed')
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def do_NAMESPACE(self, tag):
personal = public = shared = None
np = INamespacePresenter(self.account, None)
if np is not None:
personal = np.getPersonalNamespaces()
public = np.getSharedNamespaces()
shared = np.getSharedNamespaces()
self.sendUntaggedResponse('NAMESPACE ' + collapseNestedLists([personal, public, shared]))
self.sendPositiveResponse(tag, "NAMESPACE command completed")
auth_NAMESPACE = (do_NAMESPACE,)
select_NAMESPACE = auth_NAMESPACE
def _parseMbox(self, name):
if isinstance(name, unicode):
return name
try:
return name.decode('imap4-utf-7')
except:
log.err()
raise IllegalMailboxEncoding(name)
def _selectWork(self, tag, name, rw, cmdName):
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'auth'
name = self._parseMbox(name)
maybeDeferred(self.account.select, self._parseMbox(name), rw
).addCallback(self._cbSelectWork, cmdName, tag
).addErrback(self._ebSelectWork, cmdName, tag
)
def _ebSelectWork(self, failure, cmdName, tag):
self.sendBadResponse(tag, "%s failed: Server error" % (cmdName,))
log.err(failure)
def _cbSelectWork(self, mbox, cmdName, tag):
if mbox is None:
self.sendNegativeResponse(tag, 'No such mailbox')
return
if '\\noselect' in [s.lower() for s in mbox.getFlags()]:
self.sendNegativeResponse(tag, 'Mailbox cannot be selected')
return
flags = mbox.getFlags()
self.sendUntaggedResponse(str(mbox.getMessageCount()) + ' EXISTS')
self.sendUntaggedResponse(str(mbox.getRecentCount()) + ' RECENT')
self.sendUntaggedResponse('FLAGS (%s)' % ' '.join(flags))
self.sendPositiveResponse(None, '[UIDVALIDITY %d]' % mbox.getUIDValidity())
s = mbox.isWriteable() and 'READ-WRITE' or 'READ-ONLY'
mbox.addListener(self)
self.sendPositiveResponse(tag, '[%s] %s successful' % (s, cmdName))
self.state = 'select'
self.mbox = mbox
auth_SELECT = ( _selectWork, arg_astring, 1, 'SELECT' )
select_SELECT = auth_SELECT
auth_EXAMINE = ( _selectWork, arg_astring, 0, 'EXAMINE' )
select_EXAMINE = auth_EXAMINE
def do_IDLE(self, tag):
self.sendContinuationRequest(None)
self.parseTag = tag
self.lastState = self.parseState
self.parseState = 'idle'
def parse_idle(self, *args):
self.parseState = self.lastState
del self.lastState
self.sendPositiveResponse(self.parseTag, "IDLE terminated")
del self.parseTag
select_IDLE = ( do_IDLE, )
auth_IDLE = select_IDLE
def do_CREATE(self, tag, name):
name = self._parseMbox(name)
try:
result = self.account.create(name)
except MailboxException, c:
self.sendNegativeResponse(tag, str(c))
except:
self.sendBadResponse(tag, "Server error encountered while creating mailbox")
log.err()
else:
if result:
self.sendPositiveResponse(tag, 'Mailbox created')
else:
self.sendNegativeResponse(tag, 'Mailbox not created')
auth_CREATE = (do_CREATE, arg_astring)
select_CREATE = auth_CREATE
def do_DELETE(self, tag, name):
name = self._parseMbox(name)
if name.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot delete the inbox')
return
try:
self.account.delete(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while deleting mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox deleted')
auth_DELETE = (do_DELETE, arg_astring)
select_DELETE = auth_DELETE
def do_RENAME(self, tag, oldname, newname):
oldname, newname = [self._parseMbox(n) for n in oldname, newname]
if oldname.lower() == 'inbox' or newname.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot rename the inbox, or rename another mailbox to inbox.')
return
try:
self.account.rename(oldname, newname)
except TypeError:
self.sendBadResponse(tag, 'Invalid command syntax')
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while renaming mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox renamed')
auth_RENAME = (do_RENAME, arg_astring, arg_astring)
select_RENAME = auth_RENAME
def do_SUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.subscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while subscribing to mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Subscribed')
auth_SUBSCRIBE = (do_SUBSCRIBE, arg_astring)
select_SUBSCRIBE = auth_SUBSCRIBE
def do_UNSUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.unsubscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while unsubscribing from mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Unsubscribed')
auth_UNSUBSCRIBE = (do_UNSUBSCRIBE, arg_astring)
select_UNSUBSCRIBE = auth_UNSUBSCRIBE
def _listWork(self, tag, ref, mbox, sub, cmdName):
mbox = self._parseMbox(mbox)
maybeDeferred(self.account.listMailboxes, ref, mbox
).addCallback(self._cbListWork, tag, sub, cmdName
).addErrback(self._ebListWork, tag
)
def _cbListWork(self, mailboxes, tag, sub, cmdName):
for (name, box) in mailboxes:
if not sub or self.account.isSubscribed(name):
flags = box.getFlags()
delim = box.getHierarchicalDelimiter()
resp = (DontQuoteMe(cmdName), map(DontQuoteMe, flags), delim, name.encode('imap4-utf-7'))
self.sendUntaggedResponse(collapseNestedLists(resp))
self.sendPositiveResponse(tag, '%s completed' % (cmdName,))
def _ebListWork(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while listing mailboxes.")
log.err(failure)
auth_LIST = (_listWork, arg_astring, arg_astring, 0, 'LIST')
select_LIST = auth_LIST
auth_LSUB = (_listWork, arg_astring, arg_astring, 1, 'LSUB')
select_LSUB = auth_LSUB
def do_STATUS(self, tag, mailbox, names):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox, 0
).addCallback(self._cbStatusGotMailbox, tag, mailbox, names
).addErrback(self._ebStatusGotMailbox, tag
)
def _cbStatusGotMailbox(self, mbox, tag, mailbox, names):
if mbox:
maybeDeferred(mbox.requestStatus, names).addCallbacks(
self.__cbStatus, self.__ebStatus,
(tag, mailbox), None, (tag, mailbox), None
)
else:
self.sendNegativeResponse(tag, "Could not open mailbox")
def _ebStatusGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_STATUS = (do_STATUS, arg_astring, arg_plist)
select_STATUS = auth_STATUS
def __cbStatus(self, status, tag, box):
line = ' '.join(['%s %s' % x for x in status.iteritems()])
self.sendUntaggedResponse('STATUS %s (%s)' % (box, line))
self.sendPositiveResponse(tag, 'STATUS complete')
def __ebStatus(self, failure, tag, box):
self.sendBadResponse(tag, 'STATUS %s failed: %s' % (box, str(failure.value)))
def do_APPEND(self, tag, mailbox, flags, date, message):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbAppendGotMailbox, tag, flags, date, message
).addErrback(self._ebAppendGotMailbox, tag
)
def _cbAppendGotMailbox(self, mbox, tag, flags, date, message):
if not mbox:
self.sendNegativeResponse(tag, '[TRYCREATE] No such mailbox')
return
d = mbox.addMessage(message, flags, date)
d.addCallback(self.__cbAppend, tag, mbox)
d.addErrback(self.__ebAppend, tag)
def _ebAppendGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_APPEND = (do_APPEND, arg_astring, opt_plist, opt_datetime,
arg_literal)
select_APPEND = auth_APPEND
def __cbAppend(self, result, tag, mbox):
self.sendUntaggedResponse('%d EXISTS' % mbox.getMessageCount())
self.sendPositiveResponse(tag, 'APPEND complete')
def __ebAppend(self, failure, tag):
self.sendBadResponse(tag, 'APPEND failed: ' + str(failure.value))
def do_CHECK(self, tag):
d = self.checkpoint()
if d is None:
self.__cbCheck(None, tag)
else:
d.addCallbacks(
self.__cbCheck,
self.__ebCheck,
callbackArgs=(tag,),
errbackArgs=(tag,)
)
select_CHECK = (do_CHECK,)
def __cbCheck(self, result, tag):
self.sendPositiveResponse(tag, 'CHECK completed')
def __ebCheck(self, failure, tag):
self.sendBadResponse(tag, 'CHECK failed: ' + str(failure.value))
def checkpoint(self):
"""Called when the client issues a CHECK command.
This should perform any checkpoint operations required by the server.
It may be a long running operation, but may not block. If it returns
a deferred, the client will only be informed of success (or failure)
when the deferred's callback (or errback) is invoked.
"""
return None
def do_CLOSE(self, tag):
d = None
if self.mbox.isWriteable():
d = maybeDeferred(self.mbox.expunge)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
if d is not None:
d.addCallback(lambda result: cmbx.close())
else:
d = maybeDeferred(cmbx.close)
if d is not None:
d.addCallbacks(self.__cbClose, self.__ebClose, (tag,), None, (tag,), None)
else:
self.__cbClose(None, tag)
select_CLOSE = (do_CLOSE,)
def __cbClose(self, result, tag):
self.sendPositiveResponse(tag, 'CLOSE completed')
self.mbox.removeListener(self)
self.mbox = None
self.state = 'auth'
def __ebClose(self, failure, tag):
self.sendBadResponse(tag, 'CLOSE failed: ' + str(failure.value))
def do_EXPUNGE(self, tag):
if self.mbox.isWriteable():
maybeDeferred(self.mbox.expunge).addCallbacks(
self.__cbExpunge, self.__ebExpunge, (tag,), None, (tag,), None
)
else:
self.sendNegativeResponse(tag, 'EXPUNGE ignored on read-only mailbox')
select_EXPUNGE = (do_EXPUNGE,)
def __cbExpunge(self, result, tag):
for e in result:
self.sendUntaggedResponse('%d EXPUNGE' % e)
self.sendPositiveResponse(tag, 'EXPUNGE completed')
def __ebExpunge(self, failure, tag):
self.sendBadResponse(tag, 'EXPUNGE failed: ' + str(failure.value))
log.err(failure)
def do_SEARCH(self, tag, charset, query, uid=0):
sm = ISearchableMailbox(self.mbox, None)
if sm is not None:
maybeDeferred(sm.search, query, uid=uid
).addCallback(self.__cbSearch, tag, self.mbox, uid
).addErrback(self.__ebSearch, tag)
else:
# that's not the ideal way to get all messages, there should be a
# method on mailboxes that gives you all of them
s = parseIdList('1:*')
maybeDeferred(self.mbox.fetch, s, uid=uid
).addCallback(self.__cbManualSearch,
tag, self.mbox, query, uid
).addErrback(self.__ebSearch, tag)
select_SEARCH = (do_SEARCH, opt_charset, arg_searchkeys)
def __cbSearch(self, result, tag, mbox, uid):
if uid:
result = map(mbox.getUID, result)
ids = ' '.join([str(i) for i in result])
self.sendUntaggedResponse('SEARCH ' + ids)
self.sendPositiveResponse(tag, 'SEARCH completed')
def __cbManualSearch(self, result, tag, mbox, query, uid,
searchResults=None):
"""
Apply the search filter to a set of messages. Send the response to the
client.
@type result: C{list} of C{tuple} of (C{int}, provider of
L{imap4.IMessage})
@param result: A list two tuples of messages with their sequence ids,
sorted by the ids in descending order.
@type tag: C{str}
@param tag: A command tag.
@type mbox: Provider of L{imap4.IMailbox}
@param mbox: The searched mailbox.
@type query: C{list}
@param query: A list representing the parsed form of the search query.
@param uid: A flag indicating whether the search is over message
sequence numbers or UIDs.
@type searchResults: C{list}
@param searchResults: The search results so far or C{None} if no
results yet.
"""
if searchResults is None:
searchResults = []
i = 0
# result is a list of tuples (sequenceId, Message)
lastSequenceId = result and result[-1][0]
lastMessageId = result and result[-1][1].getUID()
for (i, (id, msg)) in zip(range(5), result):
# searchFilter and singleSearchStep will mutate the query. Dang.
# Copy it here or else things will go poorly for subsequent
# messages.
if self._searchFilter(copy.deepcopy(query), id, msg,
lastSequenceId, lastMessageId):
if uid:
searchResults.append(str(msg.getUID()))
else:
searchResults.append(str(id))
if i == 4:
from twisted.internet import reactor
reactor.callLater(
0, self.__cbManualSearch, result[5:], tag, mbox, query, uid,
searchResults)
else:
if searchResults:
self.sendUntaggedResponse('SEARCH ' + ' '.join(searchResults))
self.sendPositiveResponse(tag, 'SEARCH completed')
def _searchFilter(self, query, id, msg, lastSequenceId, lastMessageId):
"""
Pop search terms from the beginning of C{query} until there are none
left and apply them to the given message.
@param query: A list representing the parsed form of the search query.
@param id: The sequence number of the message being checked.
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of any message in
the mailbox being searched.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of any message in the mailbox
being searched.
@return: Boolean indicating whether all of the query terms match the
message.
"""
while query:
if not self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId):
return False
return True
def _singleSearchStep(self, query, id, msg, lastSequenceId, lastMessageId):
"""
Pop one search term from the beginning of C{query} (possibly more than
one element) and return whether it matches the given message.
@param query: A list representing the parsed form of the search query.
@param id: The sequence number of the message being checked.
@param msg: The message being checked.
@param lastSequenceId: The highest sequence number of any message in
the mailbox being searched.
@param lastMessageId: The highest UID of any message in the mailbox
being searched.
@return: Boolean indicating whether the query term matched the message.
"""
q = query.pop(0)
if isinstance(q, list):
if not self._searchFilter(q, id, msg,
lastSequenceId, lastMessageId):
return False
else:
c = q.upper()
if not c[:1].isalpha():
# A search term may be a word like ALL, ANSWERED, BCC, etc (see
# below) or it may be a message sequence set. Here we
# recognize a message sequence set "N:M".
messageSet = parseIdList(c, lastSequenceId)
return id in messageSet
else:
f = getattr(self, 'search_' + c, None)
if f is None:
raise IllegalQueryError("Invalid search command %s" % c)
if c in self._requiresLastMessageInfo:
result = f(query, id, msg, (lastSequenceId,
lastMessageId))
else:
result = f(query, id, msg)
if not result:
return False
return True
def search_ALL(self, query, id, msg):
"""
Returns C{True} if the message matches the ALL search key (always).
@type query: A C{list} of C{str}
@param query: A list representing the parsed query string.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
return True
def search_ANSWERED(self, query, id, msg):
"""
Returns C{True} if the message has been answered.
@type query: A C{list} of C{str}
@param query: A list representing the parsed query string.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
return '\\Answered' in msg.getFlags()
def search_BCC(self, query, id, msg):
"""
Returns C{True} if the message has a BCC address matching the query.
@type query: A C{list} of C{str}
@param query: A list whose first element is a BCC C{str}
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
bcc = msg.getHeaders(False, 'bcc').get('bcc', '')
return bcc.lower().find(query.pop(0).lower()) != -1
def search_BEFORE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) < date
def search_BODY(self, query, id, msg):
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_CC(self, query, id, msg):
cc = msg.getHeaders(False, 'cc').get('cc', '')
return cc.lower().find(query.pop(0).lower()) != -1
def search_DELETED(self, query, id, msg):
return '\\Deleted' in msg.getFlags()
def search_DRAFT(self, query, id, msg):
return '\\Draft' in msg.getFlags()
def search_FLAGGED(self, query, id, msg):
return '\\Flagged' in msg.getFlags()
def search_FROM(self, query, id, msg):
fm = msg.getHeaders(False, 'from').get('from', '')
return fm.lower().find(query.pop(0).lower()) != -1
def search_HEADER(self, query, id, msg):
hdr = query.pop(0).lower()
hdr = msg.getHeaders(False, hdr).get(hdr, '')
return hdr.lower().find(query.pop(0).lower()) != -1
def search_KEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_LARGER(self, query, id, msg):
return int(query.pop(0)) < msg.getSize()
def search_NEW(self, query, id, msg):
return '\\Recent' in msg.getFlags() and '\\Seen' not in msg.getFlags()
def search_NOT(self, query, id, msg, (lastSequenceId, lastMessageId)):
"""
Returns C{True} if the message does not match the query.
@type query: A C{list} of C{str}
@param query: A list representing the parsed form of the search query.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of a message in the
mailbox.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of a message in the mailbox.
"""
return not self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
def search_OLD(self, query, id, msg):
return '\\Recent' not in msg.getFlags()
def search_ON(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) == date
def search_OR(self, query, id, msg, (lastSequenceId, lastMessageId)):
"""
Returns C{True} if the message matches any of the first two query
items.
@type query: A C{list} of C{str}
@param query: A list representing the parsed form of the search query.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of a message in the
mailbox.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of a message in the mailbox.
"""
a = self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
b = self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
return a or b
def search_RECENT(self, query, id, msg):
return '\\Recent' in msg.getFlags()
def search_SEEN(self, query, id, msg):
return '\\Seen' in msg.getFlags()
def search_SENTBEFORE(self, query, id, msg):
"""
Returns C{True} if the message date is earlier than the query date.
@type query: A C{list} of C{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date < parseTime(query.pop(0))
def search_SENTON(self, query, id, msg):
"""
Returns C{True} if the message date is the same as the query date.
@type query: A C{list} of C{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date[:3] == parseTime(query.pop(0))[:3]
def search_SENTSINCE(self, query, id, msg):
"""
Returns C{True} if the message date is later than the query date.
@type query: A C{list} of C{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date > parseTime(query.pop(0))
def search_SINCE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) > date
def search_SMALLER(self, query, id, msg):
return int(query.pop(0)) > msg.getSize()
def search_SUBJECT(self, query, id, msg):
subj = msg.getHeaders(False, 'subject').get('subject', '')
return subj.lower().find(query.pop(0).lower()) != -1
def search_TEXT(self, query, id, msg):
# XXX - This must search headers too
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_TO(self, query, id, msg):
to = msg.getHeaders(False, 'to').get('to', '')
return to.lower().find(query.pop(0).lower()) != -1
def search_UID(self, query, id, msg, (lastSequenceId, lastMessageId)):
"""
Returns C{True} if the message UID is in the range defined by the
search query.
@type query: A C{list} of C{str}
@param query: A list representing the parsed form of the search
query. Its first element should be a C{str} that can be interpreted
as a sequence range, for example '2:4,5:*'.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of a message in the
mailbox.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of a message in the mailbox.
"""
c = query.pop(0)
m = parseIdList(c, lastMessageId)
return msg.getUID() in m
def search_UNANSWERED(self, query, id, msg):
return '\\Answered' not in msg.getFlags()
def search_UNDELETED(self, query, id, msg):
return '\\Deleted' not in msg.getFlags()
def search_UNDRAFT(self, query, id, msg):
return '\\Draft' not in msg.getFlags()
def search_UNFLAGGED(self, query, id, msg):
return '\\Flagged' not in msg.getFlags()
def search_UNKEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_UNSEEN(self, query, id, msg):
return '\\Seen' not in msg.getFlags()
def __ebSearch(self, failure, tag):
self.sendBadResponse(tag, 'SEARCH failed: ' + str(failure.value))
log.err(failure)
def do_FETCH(self, tag, messages, query, uid=0):
if query:
self._oldTimeout = self.setTimeout(None)
maybeDeferred(self.mbox.fetch, messages, uid=uid
).addCallback(iter
).addCallback(self.__cbFetch, tag, query, uid
).addErrback(self.__ebFetch, tag
)
else:
self.sendPositiveResponse(tag, 'FETCH complete')
select_FETCH = (do_FETCH, arg_seqset, arg_fetchatt)
def __cbFetch(self, results, tag, query, uid):
if self.blocked is None:
self.blocked = []
try:
id, msg = results.next()
except StopIteration:
# The idle timeout was suspended while we delivered results,
# restore it now.
self.setTimeout(self._oldTimeout)
del self._oldTimeout
# All results have been processed, deliver completion notification.
# It's important to run this *after* resetting the timeout to "rig
# a race" in some test code. writing to the transport will
# synchronously call test code, which synchronously loses the
# connection, calling our connectionLost method, which cancels the
# timeout. We want to make sure that timeout is cancelled *after*
# we reset it above, so that the final state is no timed
# calls. This avoids reactor uncleanliness errors in the test
# suite.
# XXX: Perhaps loopback should be fixed to not call the user code
# synchronously in transport.write?
self.sendPositiveResponse(tag, 'FETCH completed')
# Instance state is now consistent again (ie, it is as though
# the fetch command never ran), so allow any pending blocked
# commands to execute.
self._unblock()
else:
self.spewMessage(id, msg, query, uid
).addCallback(lambda _: self.__cbFetch(results, tag, query, uid)
).addErrback(self.__ebSpewMessage
)
def __ebSpewMessage(self, failure):
# This indicates a programming error.
# There's no reliable way to indicate anything to the client, since we
# may have already written an arbitrary amount of data in response to
# the command.
log.err(failure)
self.transport.loseConnection()
def spew_envelope(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('ENVELOPE ' + collapseNestedLists([getEnvelope(msg)]))
def spew_flags(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('FLAGS ' + '(%s)' % (' '.join(msg.getFlags())))
def spew_internaldate(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
idate = msg.getInternalDate()
ttup = rfc822.parsedate_tz(idate)
if ttup is None:
log.msg("%d:%r: unpareseable internaldate: %r" % (id, msg, idate))
raise IMAP4Exception("Internal failure generating INTERNALDATE")
# need to specify the month manually, as strftime depends on locale
strdate = time.strftime("%d-%%s-%Y %H:%M:%S ", ttup[:9])
odate = strdate % (_MONTH_NAMES[ttup[1]],)
if ttup[9] is None:
odate = odate + "+0000"
else:
if ttup[9] >= 0:
sign = "+"
else:
sign = "-"
odate = odate + sign + str(((abs(ttup[9]) // 3600) * 100 + (abs(ttup[9]) % 3600) // 60)).zfill(4)
_w('INTERNALDATE ' + _quote(odate))
def spew_rfc822header(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
hdrs = _formatHeaders(msg.getHeaders(True))
_w('RFC822.HEADER ' + _literal(hdrs))
def spew_rfc822text(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.TEXT ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
def spew_rfc822size(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.SIZE ' + str(msg.getSize()))
def spew_rfc822(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822 ')
_f()
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()
).beginProducing(self.transport
)
return MessageProducer(msg, None, self._scheduler
).beginProducing(self.transport
)
def spew_uid(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('UID ' + str(msg.getUID()))
def spew_bodystructure(self, id, msg, _w=None, _f=None):
_w('BODYSTRUCTURE ' + collapseNestedLists([getBodyStructure(msg, True)]))
def spew_body(self, part, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
for p in part.part:
if msg.isMultipart():
msg = msg.getSubPart(p)
elif p > 0:
# Non-multipart messages have an implicit first part but no
# other parts - reject any request for any other part.
raise TypeError("Requested subpart of non-multipart message")
if part.header:
hdrs = msg.getHeaders(part.header.negate, *part.header.fields)
hdrs = _formatHeaders(hdrs)
_w(str(part) + ' ' + _literal(hdrs))
elif part.text:
_w(str(part) + ' ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
elif part.mime:
hdrs = _formatHeaders(msg.getHeaders(True))
_w(str(part) + ' ' + _literal(hdrs))
elif part.empty:
_w(str(part) + ' ')
_f()
if part.part:
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
else:
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()).beginProducing(self.transport)
return MessageProducer(msg, None, self._scheduler).beginProducing(self.transport)
else:
_w('BODY ' + collapseNestedLists([getBodyStructure(msg)]))
def spewMessage(self, id, msg, query, uid):
wbuf = WriteBuffer(self.transport)
write = wbuf.write
flush = wbuf.flush
def start():
write('* %d FETCH (' % (id,))
def finish():
write(')\r\n')
def space():
write(' ')
def spew():
seenUID = False
start()
for part in query:
if part.type == 'uid':
seenUID = True
if part.type == 'body':
yield self.spew_body(part, id, msg, write, flush)
else:
f = getattr(self, 'spew_' + part.type)
yield f(id, msg, write, flush)
if part is not query[-1]:
space()
if uid and not seenUID:
space()
yield self.spew_uid(id, msg, write, flush)
finish()
flush()
return self._scheduler(spew())
def __ebFetch(self, failure, tag):
self.setTimeout(self._oldTimeout)
del self._oldTimeout
log.err(failure)
self.sendBadResponse(tag, 'FETCH failed: ' + str(failure.value))
def do_STORE(self, tag, messages, mode, flags, uid=0):
mode = mode.upper()
silent = mode.endswith('SILENT')
if mode.startswith('+'):
mode = 1
elif mode.startswith('-'):
mode = -1
else:
mode = 0
maybeDeferred(self.mbox.store, messages, flags, mode, uid=uid).addCallbacks(
self.__cbStore, self.__ebStore, (tag, self.mbox, uid, silent), None, (tag,), None
)
select_STORE = (do_STORE, arg_seqset, arg_atom, arg_flaglist)
def __cbStore(self, result, tag, mbox, uid, silent):
if result and not silent:
for (k, v) in result.iteritems():
if uid:
uidstr = ' UID %d' % mbox.getUID(k)
else:
uidstr = ''
self.sendUntaggedResponse('%d FETCH (FLAGS (%s)%s)' %
(k, ' '.join(v), uidstr))
self.sendPositiveResponse(tag, 'STORE completed')
def __ebStore(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def do_COPY(self, tag, messages, mailbox, uid=0):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbCopySelectedMailbox, tag, messages, mailbox, uid
).addErrback(self._ebCopySelectedMailbox, tag
)
select_COPY = (do_COPY, arg_seqset, arg_astring)
def _cbCopySelectedMailbox(self, mbox, tag, messages, mailbox, uid):
if not mbox:
self.sendNegativeResponse(tag, 'No such mailbox: ' + mailbox)
else:
maybeDeferred(self.mbox.fetch, messages, uid
).addCallback(self.__cbCopy, tag, mbox
).addCallback(self.__cbCopied, tag, mbox
).addErrback(self.__ebCopy, tag
)
def _ebCopySelectedMailbox(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def __cbCopy(self, messages, tag, mbox):
# XXX - This should handle failures with a rollback or something
addedDeferreds = []
fastCopyMbox = IMessageCopier(mbox, None)
for (id, msg) in messages:
if fastCopyMbox is not None:
d = maybeDeferred(fastCopyMbox.copy, msg)
addedDeferreds.append(d)
continue
# XXX - The following should be an implementation of IMessageCopier.copy
# on an IMailbox->IMessageCopier adapter.
flags = msg.getFlags()
date = msg.getInternalDate()
body = IMessageFile(msg, None)
if body is not None:
bodyFile = body.open()
d = maybeDeferred(mbox.addMessage, bodyFile, flags, date)
else:
def rewind(f):
f.seek(0)
return f
buffer = tempfile.TemporaryFile()
d = MessageProducer(msg, buffer, self._scheduler
).beginProducing(None
).addCallback(lambda _, b=buffer, f=flags, d=date: mbox.addMessage(rewind(b), f, d)
)
addedDeferreds.append(d)
return defer.DeferredList(addedDeferreds)
def __cbCopied(self, deferredIds, tag, mbox):
ids = []
failures = []
for (status, result) in deferredIds:
if status:
ids.append(result)
else:
failures.append(result.value)
if failures:
self.sendNegativeResponse(tag, '[ALERT] Some messages were not copied')
else:
self.sendPositiveResponse(tag, 'COPY completed')
def __ebCopy(self, failure, tag):
self.sendBadResponse(tag, 'COPY failed:' + str(failure.value))
log.err(failure)
def do_UID(self, tag, command, line):
command = command.upper()
if command not in ('COPY', 'FETCH', 'STORE', 'SEARCH'):
raise IllegalClientResponse(command)
self.dispatchCommand(tag, command, line, uid=1)
select_UID = (do_UID, arg_atom, arg_line)
#
# IMailboxListener implementation
#
def modeChanged(self, writeable):
if writeable:
self.sendUntaggedResponse(message='[READ-WRITE]', async=True)
else:
self.sendUntaggedResponse(message='[READ-ONLY]', async=True)
def flagsChanged(self, newFlags):
for (mId, flags) in newFlags.iteritems():
msg = '%d FETCH (FLAGS (%s))' % (mId, ' '.join(flags))
self.sendUntaggedResponse(msg, async=True)
def newMessages(self, exists, recent):
if exists is not None:
self.sendUntaggedResponse('%d EXISTS' % exists, async=True)
if recent is not None:
self.sendUntaggedResponse('%d RECENT' % recent, async=True)
class UnhandledResponse(IMAP4Exception): pass
class NegativeResponse(IMAP4Exception): pass
class NoSupportedAuthentication(IMAP4Exception):
def __init__(self, serverSupports, clientSupports):
IMAP4Exception.__init__(self, 'No supported authentication schemes available')
self.serverSupports = serverSupports
self.clientSupports = clientSupports
def __str__(self):
return (IMAP4Exception.__str__(self)
+ ': Server supports %r, client supports %r'
% (self.serverSupports, self.clientSupports))
class IllegalServerResponse(IMAP4Exception): pass
TIMEOUT_ERROR = error.TimeoutError()
class IMAP4Client(basic.LineReceiver, policies.TimeoutMixin):
"""IMAP4 client protocol implementation
@ivar state: A string representing the state the connection is currently
in.
"""
implements(IMailboxListener)
tags = None
waiting = None
queued = None
tagID = 1
state = None
startedTLS = False
# Number of seconds to wait before timing out a connection.
# If the number is <= 0 no timeout checking will be performed.
timeout = 0
# Capabilities are not allowed to change during the session
# So cache the first response and use that for all later
# lookups
_capCache = None
_memoryFileLimit = 1024 * 1024 * 10
# Authentication is pluggable. This maps names to IClientAuthentication
# objects.
authenticators = None
STATUS_CODES = ('OK', 'NO', 'BAD', 'PREAUTH', 'BYE')
STATUS_TRANSFORMATIONS = {
'MESSAGES': int, 'RECENT': int, 'UNSEEN': int
}
context = None
def __init__(self, contextFactory = None):
self.tags = {}
self.queued = []
self.authenticators = {}
self.context = contextFactory
self._tag = None
self._parts = None
self._lastCmd = None
def registerAuthenticator(self, auth):
"""Register a new form of authentication
When invoking the authenticate() method of IMAP4Client, the first
matching authentication scheme found will be used. The ordering is
that in which the server lists support authentication schemes.
@type auth: Implementor of C{IClientAuthentication}
@param auth: The object to use to perform the client
side of this authentication scheme.
"""
self.authenticators[auth.getName().upper()] = auth
def rawDataReceived(self, data):
if self.timeout > 0:
self.resetTimeout()
self._pendingSize -= len(data)
if self._pendingSize > 0:
self._pendingBuffer.write(data)
else:
passon = ''
if self._pendingSize < 0:
data, passon = data[:self._pendingSize], data[self._pendingSize:]
self._pendingBuffer.write(data)
rest = self._pendingBuffer
self._pendingBuffer = None
self._pendingSize = None
rest.seek(0, 0)
self._parts.append(rest.read())
self.setLineMode(passon.lstrip('\r\n'))
# def sendLine(self, line):
# print 'S:', repr(line)
# return basic.LineReceiver.sendLine(self, line)
def _setupForLiteral(self, rest, octets):
self._pendingBuffer = self.messageFile(octets)
self._pendingSize = octets
if self._parts is None:
self._parts = [rest, '\r\n']
else:
self._parts.extend([rest, '\r\n'])
self.setRawMode()
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
def connectionLost(self, reason):
"""We are no longer connected"""
if self.timeout > 0:
self.setTimeout(None)
if self.queued is not None:
queued = self.queued
self.queued = None
for cmd in queued:
cmd.defer.errback(reason)
if self.tags is not None:
tags = self.tags
self.tags = None
for cmd in tags.itervalues():
if cmd is not None and cmd.defer is not None:
cmd.defer.errback(reason)
def lineReceived(self, line):
"""
Attempt to parse a single line from the server.
@type line: C{str}
@param line: The line from the server, without the line delimiter.
@raise IllegalServerResponse: If the line or some part of the line
does not represent an allowed message from the server at this time.
"""
# print 'C: ' + repr(line)
if self.timeout > 0:
self.resetTimeout()
lastPart = line.rfind('{')
if lastPart != -1:
lastPart = line[lastPart + 1:]
if lastPart.endswith('}'):
# It's a literal a-comin' in
try:
octets = int(lastPart[:-1])
except ValueError:
raise IllegalServerResponse(line)
if self._parts is None:
self._tag, parts = line.split(None, 1)
else:
parts = line
self._setupForLiteral(parts, octets)
return
if self._parts is None:
# It isn't a literal at all
self._regularDispatch(line)
else:
# If an expression is in progress, no tag is required here
# Since we didn't find a literal indicator, this expression
# is done.
self._parts.append(line)
tag, rest = self._tag, ''.join(self._parts)
self._tag = self._parts = None
self.dispatchCommand(tag, rest)
def timeoutConnection(self):
if self._lastCmd and self._lastCmd.defer is not None:
d, self._lastCmd.defer = self._lastCmd.defer, None
d.errback(TIMEOUT_ERROR)
if self.queued:
for cmd in self.queued:
if cmd.defer is not None:
d, cmd.defer = cmd.defer, d
d.errback(TIMEOUT_ERROR)
self.transport.loseConnection()
def _regularDispatch(self, line):
parts = line.split(None, 1)
if len(parts) != 2:
parts.append('')
tag, rest = parts
self.dispatchCommand(tag, rest)
def messageFile(self, octets):
"""Create a file to which an incoming message may be written.
@type octets: C{int}
@param octets: The number of octets which will be written to the file
@rtype: Any object which implements C{write(string)} and
C{seek(int, int)}
@return: A file-like object
"""
if octets > self._memoryFileLimit:
return tempfile.TemporaryFile()
else:
return StringIO.StringIO()
def makeTag(self):
tag = '%0.4X' % self.tagID
self.tagID += 1
return tag
def dispatchCommand(self, tag, rest):
if self.state is None:
f = self.response_UNAUTH
else:
f = getattr(self, 'response_' + self.state.upper(), None)
if f:
try:
f(tag, rest)
except:
log.err()
self.transport.loseConnection()
else:
log.err("Cannot dispatch: %s, %s, %s" % (self.state, tag, rest))
self.transport.loseConnection()
def response_UNAUTH(self, tag, rest):
if self.state is None:
# Server greeting, this is
status, rest = rest.split(None, 1)
if status.upper() == 'OK':
self.state = 'unauth'
elif status.upper() == 'PREAUTH':
self.state = 'auth'
else:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
b, e = rest.find('['), rest.find(']')
if b != -1 and e != -1:
self.serverGreeting(
self.__cbCapabilities(
([parseNestedParens(rest[b + 1:e])], None)))
else:
self.serverGreeting(None)
else:
self._defaultHandler(tag, rest)
def response_AUTH(self, tag, rest):
self._defaultHandler(tag, rest)
def _defaultHandler(self, tag, rest):
if tag == '*' or tag == '+':
if not self.waiting:
self._extraInfo([parseNestedParens(rest)])
else:
cmd = self.tags[self.waiting]
if tag == '+':
cmd.continuation(rest)
else:
cmd.lines.append(rest)
else:
try:
cmd = self.tags[tag]
except KeyError:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
else:
status, line = rest.split(None, 1)
if status == 'OK':
# Give them this last line, too
cmd.finish(rest, self._extraInfo)
else:
cmd.defer.errback(IMAP4Exception(line))
del self.tags[tag]
self.waiting = None
self._flushQueue()
def _flushQueue(self):
if self.queued:
cmd = self.queued.pop(0)
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
def _extraInfo(self, lines):
# XXX - This is terrible.
# XXX - Also, this should collapse temporally proximate calls into single
# invocations of IMailboxListener methods, where possible.
flags = {}
recent = exists = None
for response in lines:
elements = len(response)
if elements == 1 and response[0] == ['READ-ONLY']:
self.modeChanged(False)
elif elements == 1 and response[0] == ['READ-WRITE']:
self.modeChanged(True)
elif elements == 2 and response[1] == 'EXISTS':
exists = int(response[0])
elif elements == 2 and response[1] == 'RECENT':
recent = int(response[0])
elif elements == 3 and response[1] == 'FETCH':
mId = int(response[0])
values = self._parseFetchPairs(response[2])
flags.setdefault(mId, []).extend(values.get('FLAGS', ()))
else:
log.msg('Unhandled unsolicited response: %s' % (response,))
if flags:
self.flagsChanged(flags)
if recent is not None or exists is not None:
self.newMessages(exists, recent)
def sendCommand(self, cmd):
cmd.defer = defer.Deferred()
if self.waiting:
self.queued.append(cmd)
return cmd.defer
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
self._lastCmd = cmd
return cmd.defer
def getCapabilities(self, useCache=1):
"""Request the capabilities available on this server.
This command is allowed in any state of connection.
@type useCache: C{bool}
@param useCache: Specify whether to use the capability-cache or to
re-retrieve the capabilities from the server. Server capabilities
should never change, so for normal use, this flag should never be
false.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a
dictionary mapping capability types to lists of supported
mechanisms, or to None if a support list is not applicable.
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cmd = 'CAPABILITY'
resp = ('CAPABILITY',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbCapabilities)
return d
def __cbCapabilities(self, (lines, tagline)):
caps = {}
for rest in lines:
for cap in rest[1:]:
parts = cap.split('=', 1)
if len(parts) == 1:
category, value = parts[0], None
else:
category, value = parts
caps.setdefault(category, []).append(value)
# Preserve a non-ideal API for backwards compatibility. It would
# probably be entirely sensible to have an object with a wider API than
# dict here so this could be presented less insanely.
for category in caps:
if caps[category] == [None]:
caps[category] = None
self._capCache = caps
return caps
def logout(self):
"""Inform the server that we are done with the connection.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with None
when the proper server acknowledgement has been received.
"""
d = self.sendCommand(Command('LOGOUT', wantResponse=('BYE',)))
d.addCallback(self.__cbLogout)
return d
def __cbLogout(self, (lines, tagline)):
self.transport.loseConnection()
# We don't particularly care what the server said
return None
def noop(self):
"""Perform no operation.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list
of untagged status updates the server responds with.
"""
d = self.sendCommand(Command('NOOP'))
d.addCallback(self.__cbNoop)
return d
def __cbNoop(self, (lines, tagline)):
# Conceivable, this is elidable.
# It is, afterall, a no-op.
return lines
def startTLS(self, contextFactory=None):
"""
Initiates a 'STARTTLS' request and negotiates the TLS / SSL
Handshake.
@param contextFactory: The TLS / SSL Context Factory to
leverage. If the contextFactory is None the IMAP4Client will
either use the current TLS / SSL Context Factory or attempt to
create a new one.
@type contextFactory: C{ssl.ClientContextFactory}
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(IMAP4Exception(
"IMAP4Client requires a TLS context to "
"initiate the STARTTLS handshake"))
if 'STARTTLS' not in self._capCache:
return defer.fail(IMAP4Exception(
"Server does not support secure communication "
"via TLS / SSL"))
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(IMAP4Exception(
"IMAP4Client transport does not implement "
"interfaces.ITLSTransport"))
d = self.sendCommand(Command('STARTTLS'))
d.addCallback(self._startedTLS, contextFactory)
d.addCallback(lambda _: self.getCapabilities())
return d
def authenticate(self, secret):
"""Attempt to enter the authenticated state with the server
This command is allowed in the Non-Authenticated state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the authentication
succeeds and whose errback will be invoked otherwise.
"""
if self._capCache is None:
d = self.getCapabilities()
else:
d = defer.succeed(self._capCache)
d.addCallback(self.__cbAuthenticate, secret)
return d
def __cbAuthenticate(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
if self.startedTLS:
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
else:
def ebStartTLS(err):
err.trap(IMAP4Exception)
# We couldn't negotiate TLS for some reason
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
d = self.startTLS()
d.addErrback(ebStartTLS)
d.addCallback(lambda _: self.getCapabilities())
d.addCallback(self.__cbAuthTLS, secret)
return d
def __cbContinueAuth(self, rest, scheme, secret):
try:
chal = base64.decodestring(rest + '\n')
except binascii.Error:
self.sendLine('*')
raise IllegalServerResponse(rest)
else:
auth = self.authenticators[scheme]
chal = auth.challengeResponse(secret, chal)
self.sendLine(base64.encodestring(chal).strip())
def __cbAuthTLS(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
raise NoSupportedAuthentication(auths, self.authenticators.keys())
def login(self, username, password):
"""Authenticate with the server using a username and password
This command is allowed in the Non-Authenticated state. If the
server supports the STARTTLS capability and our transport supports
TLS, TLS is negotiated before the login command is issued.
A more secure way to log in is to use C{startTLS} or
C{authenticate} or both.
@type username: C{str}
@param username: The username to log in with
@type password: C{str}
@param password: The password to log in with
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if login is successful
and whose errback is invoked otherwise.
"""
d = maybeDeferred(self.getCapabilities)
d.addCallback(self.__cbLoginCaps, username, password)
return d
def serverGreeting(self, caps):
"""Called when the server has sent us a greeting.
@type caps: C{dict}
@param caps: Capabilities the server advertised in its greeting.
"""
def _getContextFactory(self):
if self.context is not None:
return self.context
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def __cbLoginCaps(self, capabilities, username, password):
# If the server advertises STARTTLS, we might want to try to switch to TLS
tryTLS = 'STARTTLS' in capabilities
# If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallbacks(
self.__cbLoginTLS,
self.__ebLoginTLS,
callbackArgs=(username, password),
)
return d
else:
if nontlsTransport:
log.msg("Server has no TLS support. logging in over cleartext!")
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def _startedTLS(self, result, context):
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def __cbLoginTLS(self, result, username, password):
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def __ebLoginTLS(self, failure):
log.err(failure)
return failure
def namespace(self):
"""Retrieve information about the namespaces available to this account
This command is allowed in the Authenticated and Selected states.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with namespace
information. An example of this information is::
[[['', '/']], [], []]
which indicates a single personal namespace called '' with '/'
as its hierarchical delimiter, and no shared or user namespaces.
"""
cmd = 'NAMESPACE'
resp = ('NAMESPACE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbNamespace)
return d
def __cbNamespace(self, (lines, last)):
for parts in lines:
if len(parts) == 4 and parts[0] == 'NAMESPACE':
return [e or [] for e in parts[1:]]
log.err("No NAMESPACE response to NAMESPACE command")
return [[], [], []]
def select(self, mailbox):
"""
Select a mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to select
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the select is successful and whose errback is
invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
FLAGS: A list of strings containing the flags settable on
messages in this mailbox.
EXISTS: An integer indicating the number of messages in this
mailbox.
RECENT: An integer indicating the number of "recent"
messages in this mailbox.
UNSEEN: The message sequence number (an integer) of the
first unseen message in the mailbox.
PERMANENTFLAGS: A list of strings containing the flags that
can be permanently set on messages in this mailbox.
UIDVALIDITY: An integer uniquely identifying this mailbox.
"""
cmd = 'SELECT'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 1)
return d
def examine(self, mailbox):
"""Select a mailbox in read-only mode
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to examine
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the examine is successful and whose errback
is invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
'FLAGS': A list of strings containing the flags settable on
messages in this mailbox.
'EXISTS': An integer indicating the number of messages in this
mailbox.
'RECENT': An integer indicating the number of \"recent\"
messages in this mailbox.
'UNSEEN': An integer indicating the number of messages not
flagged \\Seen in this mailbox.
'PERMANENTFLAGS': A list of strings containing the flags that
can be permanently set on messages in this mailbox.
'UIDVALIDITY': An integer uniquely identifying this mailbox.
"""
cmd = 'EXAMINE'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 0)
return d
def _intOrRaise(self, value, phrase):
"""
Parse C{value} as an integer and return the result or raise
L{IllegalServerResponse} with C{phrase} as an argument if C{value}
cannot be parsed as an integer.
"""
try:
return int(value)
except ValueError:
raise IllegalServerResponse(phrase)
def __cbSelect(self, (lines, tagline), rw):
"""
Handle lines received in response to a SELECT or EXAMINE command.
See RFC 3501, section 6.3.1.
"""
# In the absence of specification, we are free to assume:
# READ-WRITE access
datum = {'READ-WRITE': rw}
lines.append(parseNestedParens(tagline))
for split in lines:
if len(split) > 0 and split[0].upper() == 'OK':
# Handle all the kinds of OK response.
content = split[1]
key = content[0].upper()
if key == 'READ-ONLY':
datum['READ-WRITE'] = False
elif key == 'READ-WRITE':
datum['READ-WRITE'] = True
elif key == 'UIDVALIDITY':
datum['UIDVALIDITY'] = self._intOrRaise(
content[1], split)
elif key == 'UNSEEN':
datum['UNSEEN'] = self._intOrRaise(content[1], split)
elif key == 'UIDNEXT':
datum['UIDNEXT'] = self._intOrRaise(content[1], split)
elif key == 'PERMANENTFLAGS':
datum['PERMANENTFLAGS'] = tuple(content[1])
else:
log.err('Unhandled SELECT response (2): %s' % (split,))
elif len(split) == 2:
# Handle FLAGS, EXISTS, and RECENT
if split[0].upper() == 'FLAGS':
datum['FLAGS'] = tuple(split[1])
elif isinstance(split[1], str):
# Must make sure things are strings before treating them as
# strings since some other forms of response have nesting in
# places which results in lists instead.
if split[1].upper() == 'EXISTS':
datum['EXISTS'] = self._intOrRaise(split[0], split)
elif split[1].upper() == 'RECENT':
datum['RECENT'] = self._intOrRaise(split[0], split)
else:
log.err('Unhandled SELECT response (0): %s' % (split,))
else:
log.err('Unhandled SELECT response (1): %s' % (split,))
else:
log.err('Unhandled SELECT response (4): %s' % (split,))
return datum
def create(self, name):
"""Create a new mailbox on the server
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to create.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the mailbox creation
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('CREATE', _prepareMailboxName(name)))
def delete(self, name):
"""Delete a mailbox
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to delete.
@rtype: C{Deferred}
@return: A deferred whose calblack is invoked if the mailbox is
deleted successfully and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('DELETE', _prepareMailboxName(name)))
def rename(self, oldname, newname):
"""Rename a mailbox
This command is allowed in the Authenticated and Selected states.
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to give the mailbox.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the rename is
successful and whose errback is invoked otherwise.
"""
oldname = _prepareMailboxName(oldname)
newname = _prepareMailboxName(newname)
return self.sendCommand(Command('RENAME', ' '.join((oldname, newname))))
def subscribe(self, name):
"""Add a mailbox to the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to mark as 'active' or 'subscribed'
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the subscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('SUBSCRIBE', _prepareMailboxName(name)))
def unsubscribe(self, name):
"""Remove a mailbox from the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to unsubscribe
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the unsubscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('UNSUBSCRIBE', _prepareMailboxName(name)))
def list(self, reference, wildcard):
"""List a subset of the available mailboxes
This command is allowed in the Authenticated and Selected states.
@type reference: C{str}
@param reference: The context in which to interpret C{wildcard}
@type wildcard: C{str}
@param wildcard: The pattern of mailbox names to match, optionally
including either or both of the '*' and '%' wildcards. '*' will
match zero or more characters and cross hierarchical boundaries.
'%' will also match zero or more characters, but is limited to a
single hierarchical level.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of C{tuple}s,
the first element of which is a C{tuple} of mailbox flags, the second
element of which is the hierarchy delimiter for this mailbox, and the
third of which is the mailbox name; if the command is unsuccessful,
the deferred's errback is invoked instead.
"""
cmd = 'LIST'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LIST',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LIST')
return d
def lsub(self, reference, wildcard):
"""List a subset of the subscribed available mailboxes
This command is allowed in the Authenticated and Selected states.
The parameters and returned object are the same as for the C{list}
method, with one slight difference: Only mailboxes which have been
subscribed can be included in the resulting list.
"""
cmd = 'LSUB'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LSUB',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LSUB')
return d
def __cbList(self, (lines, last), command):
results = []
for parts in lines:
if len(parts) == 4 and parts[0] == command:
parts[1] = tuple(parts[1])
results.append(tuple(parts[1:]))
return results
def status(self, mailbox, *names):
"""
Retrieve the status of the given mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to query
@type *names: C{str}
@param *names: The status names to query. These may be any number of:
C{'MESSAGES'}, C{'RECENT'}, C{'UIDNEXT'}, C{'UIDVALIDITY'}, and
C{'UNSEEN'}.
@rtype: C{Deferred}
@return: A deferred which fires with the status information if the
command is successful and whose errback is invoked otherwise. The
status information is in the form of a C{dict}. Each element of
C{names} is a key in the dictionary. The value for each key is the
corresponding response from the server.
"""
cmd = 'STATUS'
args = "%s (%s)" % (_prepareMailboxName(mailbox), ' '.join(names))
resp = ('STATUS',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbStatus)
return d
def __cbStatus(self, (lines, last)):
status = {}
for parts in lines:
if parts[0] == 'STATUS':
items = parts[2]
items = [items[i:i+2] for i in range(0, len(items), 2)]
status.update(dict(items))
for k in status.keys():
t = self.STATUS_TRANSFORMATIONS.get(k)
if t:
try:
status[k] = t(status[k])
except Exception, e:
raise IllegalServerResponse('(%s %s): %s' % (k, status[k], str(e)))
return status
def append(self, mailbox, message, flags = (), date = None):
"""Add the given message to the given mailbox.
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The mailbox to which to add this message.
@type message: Any file-like object
@param message: The message to add, in RFC822 format. Newlines
in this file should be \\r\\n-style.
@type flags: Any iterable of C{str}
@param flags: The flags to associated with this message.
@type date: C{str}
@param date: The date to associate with this message. This should
be of the format DD-MM-YYYY HH:MM:SS +/-HHMM. For example, in
Eastern Standard Time, on July 1st 2004 at half past 1 PM,
\"01-07-2004 13:30:00 -0500\".
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
message.seek(0, 2)
L = message.tell()
message.seek(0, 0)
fmt = '%s (%s)%s {%d}'
if date:
date = ' "%s"' % date
else:
date = ''
cmd = fmt % (
_prepareMailboxName(mailbox), ' '.join(flags),
date, L
)
d = self.sendCommand(Command('APPEND', cmd, (), self.__cbContinueAppend, message))
return d
def __cbContinueAppend(self, lines, message):
s = basic.FileSender()
return s.beginFileTransfer(message, self.transport, None
).addCallback(self.__cbFinishAppend)
def __cbFinishAppend(self, foo):
self.sendLine('')
def check(self):
"""Tell the server to perform a checkpoint
This command is allowed in the Selected state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CHECK'))
def close(self):
"""Return the connection to the Authenticated state.
This command is allowed in the Selected state.
Issuing this command will also remove all messages flagged \\Deleted
from the selected mailbox if it is opened in read-write mode,
otherwise it indicates success by no messages are removed.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when the command
completes successfully or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CLOSE'))
def expunge(self):
"""Return the connection to the Authenticate state.
This command is allowed in the Selected state.
Issuing this command will perform the same actions as issuing the
close command, but will also generate an 'expunge' response for
every message deleted.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
'expunge' responses when this command is successful or whose errback
is invoked otherwise.
"""
cmd = 'EXPUNGE'
resp = ('EXPUNGE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbExpunge)
return d
def __cbExpunge(self, (lines, last)):
ids = []
for parts in lines:
if len(parts) == 2 and parts[1] == 'EXPUNGE':
ids.append(self._intOrRaise(parts[0], parts))
return ids
def search(self, *queries, **kwarg):
"""Search messages in the currently selected mailbox
This command is allowed in the Selected state.
Any non-zero number of queries are accepted by this method, as
returned by the C{Query}, C{Or}, and C{Not} functions.
One keyword argument is accepted: if uid is passed in with a non-zero
value, the server is asked to return message UIDs instead of message
sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list of all
the message sequence numbers return by the search, or whose errback
will be invoked if there is an error.
"""
if kwarg.get('uid'):
cmd = 'UID SEARCH'
else:
cmd = 'SEARCH'
args = ' '.join(queries)
d = self.sendCommand(Command(cmd, args, wantResponse=(cmd,)))
d.addCallback(self.__cbSearch)
return d
def __cbSearch(self, (lines, end)):
ids = []
for parts in lines:
if len(parts) > 0 and parts[0] == 'SEARCH':
ids.extend([self._intOrRaise(p, parts) for p in parts[1:]])
return ids
def fetchUID(self, messages, uid=0):
"""Retrieve the unique identifier for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message sequence numbers to unique message identifiers, or whose
errback is invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, uid=1)
def fetchFlags(self, messages, uid=0):
"""Retrieve the flags for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve flags.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to lists of flags, or whose errback is invoked if
there is an error.
"""
return self._fetch(str(messages), useUID=uid, flags=1)
def fetchInternalDate(self, messages, uid=0):
"""Retrieve the internal date associated with one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve the internal date.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to date strings, or whose errback is invoked
if there is an error. Date strings take the format of
\"day-month-year time timezone\".
"""
return self._fetch(str(messages), useUID=uid, internaldate=1)
def fetchEnvelope(self, messages, uid=0):
"""Retrieve the envelope data for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve envelope data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to envelope data, or whose errback is invoked
if there is an error. Envelope data consists of a sequence of the
date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
and message-id header fields. The date, subject, in-reply-to, and
message-id fields are strings, while the from, sender, reply-to,
to, cc, and bcc fields contain address data. Address data consists
of a sequence of name, source route, mailbox name, and hostname.
Fields which are not present for a particular address may be C{None}.
"""
return self._fetch(str(messages), useUID=uid, envelope=1)
def fetchBodyStructure(self, messages, uid=0):
"""Retrieve the structure of the body of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve body structure
data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body structure data, or whose errback is invoked
if there is an error. Body structure data describes the MIME-IMB
format of a message and consists of a sequence of mime type, mime
subtype, parameters, content id, description, encoding, and size.
The fields following the size field are variable: if the mime
type/subtype is message/rfc822, the contained message's envelope
information, body structure data, and number of lines of text; if
the mime type is text, the number of lines of text. Extension fields
may also be included; if present, they are: the MD5 hash of the body,
body disposition, body language.
"""
return self._fetch(messages, useUID=uid, bodystructure=1)
def fetchSimplifiedBody(self, messages, uid=0):
"""Retrieve the simplified body structure of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body data, or whose errback is invoked
if there is an error. The simplified body structure is the same
as the body structure, except that extension fields will never be
present.
"""
return self._fetch(messages, useUID=uid, body=1)
def fetchMessage(self, messages, uid=0):
"""Retrieve one or more entire messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A L{Deferred} which will fire with a C{dict} mapping message
sequence numbers to C{dict}s giving message data for the
corresponding message. If C{uid} is true, the inner dictionaries
have a C{'UID'} key mapped to a C{str} giving the UID for the
message. The text of the message is a C{str} associated with the
C{'RFC822'} key in each dictionary.
"""
return self._fetch(messages, useUID=uid, rfc822=1)
def fetchHeaders(self, messages, uid=0):
"""Retrieve headers of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dicts of message headers, or whose errback is
invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, rfc822header=1)
def fetchBody(self, messages, uid=0):
"""Retrieve body text of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to file-like objects containing body text, or whose
errback is invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, rfc822text=1)
def fetchSize(self, messages, uid=0):
"""Retrieve the size, in octets, of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to sizes, or whose errback is invoked if there is
an error.
"""
return self._fetch(messages, useUID=uid, rfc822size=1)
def fetchFull(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, C{fetchEnvelope}, and C{fetchSimplifiedBody}
functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", "envelope", and "body".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1, body=1)
def fetchAll(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, and C{fetchEnvelope} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", and "envelope".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1)
def fetchFast(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate}, and
C{fetchSize} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys are
"flags", "date", and "size".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1, rfc822size=1)
def _parseFetchPairs(self, fetchResponseList):
"""
Given the result of parsing a single I{FETCH} response, construct a
C{dict} mapping response keys to response values.
@param fetchResponseList: The result of parsing a I{FETCH} response
with L{parseNestedParens} and extracting just the response data
(that is, just the part that comes after C{"FETCH"}). The form
of this input (and therefore the output of this method) is very
disagreeable. A valuable improvement would be to enumerate the
possible keys (representing them as structured objects of some
sort) rather than using strings and tuples of tuples of strings
and so forth. This would allow the keys to be documented more
easily and would allow for a much simpler application-facing API
(one not based on looking up somewhat hard to predict keys in a
dict). Since C{fetchResponseList} notionally represents a
flattened sequence of pairs (identifying keys followed by their
associated values), collapsing such complex elements of this
list as C{["BODY", ["HEADER.FIELDS", ["SUBJECT"]]]} into a
single object would also greatly simplify the implementation of
this method.
@return: A C{dict} of the response data represented by C{pairs}. Keys
in this dictionary are things like C{"RFC822.TEXT"}, C{"FLAGS"}, or
C{("BODY", ("HEADER.FIELDS", ("SUBJECT",)))}. Values are entirely
dependent on the key with which they are associated, but retain the
same structured as produced by L{parseNestedParens}.
"""
values = {}
responseParts = iter(fetchResponseList)
while True:
try:
key = responseParts.next()
except StopIteration:
break
try:
value = responseParts.next()
except StopIteration:
raise IllegalServerResponse(
"Not enough arguments", fetchResponseList)
# The parsed forms of responses like:
#
# BODY[] VALUE
# BODY[TEXT] VALUE
# BODY[HEADER.FIELDS (SUBJECT)] VALUE
# BODY[HEADER.FIELDS (SUBJECT)]<N.M> VALUE
#
# are:
#
# ["BODY", [], VALUE]
# ["BODY", ["TEXT"], VALUE]
# ["BODY", ["HEADER.FIELDS", ["SUBJECT"]], VALUE]
# ["BODY", ["HEADER.FIELDS", ["SUBJECT"]], "<N.M>", VALUE]
#
# Additionally, BODY responses for multipart messages are
# represented as:
#
# ["BODY", VALUE]
#
# with list as the type of VALUE and the type of VALUE[0].
#
# See #6281 for ideas on how this might be improved.
if key not in ("BODY", "BODY.PEEK"):
# Only BODY (and by extension, BODY.PEEK) responses can have
# body sections.
hasSection = False
elif not isinstance(value, list):
# A BODY section is always represented as a list. Any non-list
# is not a BODY section.
hasSection = False
elif len(value) > 2:
# The list representing a BODY section has at most two elements.
hasSection = False
elif value and isinstance(value[0], list):
# A list containing a list represents the body structure of a
# multipart message, instead.
hasSection = False
else:
# Otherwise it must have a BODY section to examine.
hasSection = True
# If it has a BODY section, grab some extra elements and shuffle
# around the shape of the key a little bit.
if hasSection:
if len(value) < 2:
key = (key, tuple(value))
else:
key = (key, (value[0], tuple(value[1])))
try:
value = responseParts.next()
except StopIteration:
raise IllegalServerResponse(
"Not enough arguments", fetchResponseList)
# Handle partial ranges
if value.startswith('<') and value.endswith('>'):
try:
int(value[1:-1])
except ValueError:
# This isn't really a range, it's some content.
pass
else:
key = key + (value,)
try:
value = responseParts.next()
except StopIteration:
raise IllegalServerResponse(
"Not enough arguments", fetchResponseList)
values[key] = value
return values
def _cbFetch(self, (lines, last), requestedParts, structured):
info = {}
for parts in lines:
if len(parts) == 3 and parts[1] == 'FETCH':
id = self._intOrRaise(parts[0], parts)
if id not in info:
info[id] = [parts[2]]
else:
info[id][0].extend(parts[2])
results = {}
for (messageId, values) in info.iteritems():
mapping = self._parseFetchPairs(values[0])
results.setdefault(messageId, {}).update(mapping)
flagChanges = {}
for messageId in results.keys():
values = results[messageId]
for part in values.keys():
if part not in requestedParts and part == 'FLAGS':
flagChanges[messageId] = values['FLAGS']
# Find flags in the result and get rid of them.
for i in range(len(info[messageId][0])):
if info[messageId][0][i] == 'FLAGS':
del info[messageId][0][i:i+2]
break
del values['FLAGS']
if not values:
del results[messageId]
if flagChanges:
self.flagsChanged(flagChanges)
if structured:
return results
else:
return info
def fetchSpecific(self, messages, uid=0, headerType=None,
headerNumber=None, headerArgs=None, peek=None,
offset=None, length=None):
"""Retrieve a specific section of one or more messages
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@type headerType: C{str}
@param headerType: If specified, must be one of HEADER,
HEADER.FIELDS, HEADER.FIELDS.NOT, MIME, or TEXT, and will determine
which part of the message is retrieved. For HEADER.FIELDS and
HEADER.FIELDS.NOT, C{headerArgs} must be a sequence of header names.
For MIME, C{headerNumber} must be specified.
@type headerNumber: C{int} or C{int} sequence
@param headerNumber: The nested rfc822 index specifying the
entity to retrieve. For example, C{1} retrieves the first
entity of the message, and C{(2, 1, 3}) retrieves the 3rd
entity inside the first entity inside the second entity of
the message.
@type headerArgs: A sequence of C{str}
@param headerArgs: If C{headerType} is HEADER.FIELDS, these are the
headers to retrieve. If it is HEADER.FIELDS.NOT, these are the
headers to exclude from retrieval.
@type peek: C{bool}
@param peek: If true, cause the server to not set the \\Seen
flag on this message as a result of this command.
@type offset: C{int}
@param offset: The number of octets at the beginning of the result
to skip.
@type length: C{int}
@param length: The number of octets to retrieve.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a mapping of
message numbers to retrieved data, or whose errback is invoked
if there is an error.
"""
fmt = '%s BODY%s[%s%s%s]%s'
if headerNumber is None:
number = ''
elif isinstance(headerNumber, int):
number = str(headerNumber)
else:
number = '.'.join(map(str, headerNumber))
if headerType is None:
header = ''
elif number:
header = '.' + headerType
else:
header = headerType
if header and headerType not in ('TEXT', 'MIME'):
if headerArgs is not None:
payload = ' (%s)' % ' '.join(headerArgs)
else:
payload = ' ()'
else:
payload = ''
if offset is None:
extra = ''
else:
extra = '<%d.%d>' % (offset, length)
fetch = uid and 'UID FETCH' or 'FETCH'
cmd = fmt % (messages, peek and '.PEEK' or '', number, header, payload, extra)
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
d.addCallback(self._cbFetch, (), False)
return d
def _fetch(self, messages, useUID=0, **terms):
fetch = useUID and 'UID FETCH' or 'FETCH'
if 'rfc822text' in terms:
del terms['rfc822text']
terms['rfc822.text'] = True
if 'rfc822size' in terms:
del terms['rfc822size']
terms['rfc822.size'] = True
if 'rfc822header' in terms:
del terms['rfc822header']
terms['rfc822.header'] = True
cmd = '%s (%s)' % (messages, ' '.join([s.upper() for s in terms.keys()]))
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
d.addCallback(self._cbFetch, map(str.upper, terms.keys()), True)
return d
def setFlags(self, messages, flags, silent=1, uid=0):
"""Set the flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), 'FLAGS', silent, flags, uid)
def addFlags(self, messages, flags, silent=1, uid=0):
"""Add to the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages),'+FLAGS', silent, flags, uid)
def removeFlags(self, messages, flags, silent=1, uid=0):
"""Remove from the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), '-FLAGS', silent, flags, uid)
def _store(self, messages, cmd, silent, flags, uid):
if silent:
cmd = cmd + '.SILENT'
store = uid and 'UID STORE' or 'STORE'
args = ' '.join((messages, cmd, '(%s)' % ' '.join(flags)))
d = self.sendCommand(Command(store, args, wantResponse=('FETCH',)))
expected = ()
if not silent:
expected = ('FLAGS',)
d.addCallback(self._cbFetch, expected, True)
return d
def copy(self, messages, mailbox, uid):
"""Copy the specified messages to the specified mailbox.
This command is allowed in the Selected state.
@type messages: C{str}
@param messages: A message sequence set
@type mailbox: C{str}
@param mailbox: The mailbox to which to copy the messages
@type uid: C{bool}
@param uid: If true, the C{messages} refers to message UIDs, rather
than message sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a true value
when the copy is successful, or whose errback is invoked if there
is an error.
"""
if uid:
cmd = 'UID COPY'
else:
cmd = 'COPY'
args = '%s %s' % (messages, _prepareMailboxName(mailbox))
return self.sendCommand(Command(cmd, args))
#
# IMailboxListener methods
#
def modeChanged(self, writeable):
"""Override me"""
def flagsChanged(self, newFlags):
"""Override me"""
def newMessages(self, exists, recent):
"""Override me"""
class IllegalIdentifierError(IMAP4Exception): pass
def parseIdList(s, lastMessageId=None):
"""
Parse a message set search key into a C{MessageSet}.
@type s: C{str}
@param s: A string description of a id list, for example "1:3, 4:*"
@type lastMessageId: C{int}
@param lastMessageId: The last message sequence id or UID, depending on
whether we are parsing the list in UID or sequence id context. The
caller should pass in the correct value.
@rtype: C{MessageSet}
@return: A C{MessageSet} that contains the ids defined in the list
"""
res = MessageSet()
parts = s.split(',')
for p in parts:
if ':' in p:
low, high = p.split(':', 1)
try:
if low == '*':
low = None
else:
low = long(low)
if high == '*':
high = None
else:
high = long(high)
if low is high is None:
# *:* does not make sense
raise IllegalIdentifierError(p)
# non-positive values are illegal according to RFC 3501
if ((low is not None and low <= 0) or
(high is not None and high <= 0)):
raise IllegalIdentifierError(p)
# star means "highest value of an id in the mailbox"
high = high or lastMessageId
low = low or lastMessageId
# RFC says that 2:4 and 4:2 are equivalent
if low > high:
low, high = high, low
res.extend((low, high))
except ValueError:
raise IllegalIdentifierError(p)
else:
try:
if p == '*':
p = None
else:
p = long(p)
if p is not None and p <= 0:
raise IllegalIdentifierError(p)
except ValueError:
raise IllegalIdentifierError(p)
else:
res.extend(p or lastMessageId)
return res
class IllegalQueryError(IMAP4Exception): pass
_SIMPLE_BOOL = (
'ALL', 'ANSWERED', 'DELETED', 'DRAFT', 'FLAGGED', 'NEW', 'OLD', 'RECENT',
'SEEN', 'UNANSWERED', 'UNDELETED', 'UNDRAFT', 'UNFLAGGED', 'UNSEEN'
)
_NO_QUOTES = (
'LARGER', 'SMALLER', 'UID'
)
def Query(sorted=0, **kwarg):
"""Create a query string
Among the accepted keywords are::
all : If set to a true value, search all messages in the
current mailbox
answered : If set to a true value, search messages flagged with
\\Answered
bcc : A substring to search the BCC header field for
before : Search messages with an internal date before this
value. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
body : A substring to search the body of the messages for
cc : A substring to search the CC header field for
deleted : If set to a true value, search messages flagged with
\\Deleted
draft : If set to a true value, search messages flagged with
\\Draft
flagged : If set to a true value, search messages flagged with
\\Flagged
from : A substring to search the From header field for
header : A two-tuple of a header name and substring to search
for in that header
keyword : Search for messages with the given keyword set
larger : Search for messages larger than this number of octets
messages : Search only the given message sequence set.
new : If set to a true value, search messages flagged with
\\Recent but not \\Seen
old : If set to a true value, search messages not flagged with
\\Recent
on : Search messages with an internal date which is on this
date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
recent : If set to a true value, search for messages flagged with
\\Recent
seen : If set to a true value, search for messages flagged with
\\Seen
sentbefore : Search for messages with an RFC822 'Date' header before
this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
senton : Search for messages with an RFC822 'Date' header which is
on this date The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
sentsince : Search for messages with an RFC822 'Date' header which is
after this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
since : Search for messages with an internal date that is after
this date.. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
smaller : Search for messages smaller than this number of octets
subject : A substring to search the 'subject' header for
text : A substring to search the entire message for
to : A substring to search the 'to' header for
uid : Search only the messages in the given message set
unanswered : If set to a true value, search for messages not
flagged with \\Answered
undeleted : If set to a true value, search for messages not
flagged with \\Deleted
undraft : If set to a true value, search for messages not
flagged with \\Draft
unflagged : If set to a true value, search for messages not
flagged with \\Flagged
unkeyword : Search for messages without the given keyword set
unseen : If set to a true value, search for messages not
flagged with \\Seen
@type sorted: C{bool}
@param sorted: If true, the output will be sorted, alphabetically.
The standard does not require it, but it makes testing this function
easier. The default is zero, and this should be acceptable for any
application.
@rtype: C{str}
@return: The formatted query string
"""
cmd = []
keys = kwarg.keys()
if sorted:
keys.sort()
for k in keys:
v = kwarg[k]
k = k.upper()
if k in _SIMPLE_BOOL and v:
cmd.append(k)
elif k == 'HEADER':
cmd.extend([k, v[0], '"%s"' % (v[1],)])
elif k == 'KEYWORD' or k == 'UNKEYWORD':
# Discard anything that does not fit into an "atom". Perhaps turn
# the case where this actually removes bytes from the value into a
# warning and then an error, eventually. See #6277.
v = string.translate(v, string.maketrans('', ''), _nonAtomChars)
cmd.extend([k, v])
elif k not in _NO_QUOTES:
cmd.extend([k, '"%s"' % (v,)])
else:
cmd.extend([k, '%s' % (v,)])
if len(cmd) > 1:
return '(%s)' % ' '.join(cmd)
else:
return ' '.join(cmd)
def Or(*args):
"""The disjunction of two or more queries"""
if len(args) < 2:
raise IllegalQueryError, args
elif len(args) == 2:
return '(OR %s %s)' % args
else:
return '(OR %s %s)' % (args[0], Or(*args[1:]))
def Not(query):
"""The negation of a query"""
return '(NOT %s)' % (query,)
class MismatchedNesting(IMAP4Exception):
pass
class MismatchedQuoting(IMAP4Exception):
pass
def wildcardToRegexp(wildcard, delim=None):
wildcard = wildcard.replace('*', '(?:.*?)')
if delim is None:
wildcard = wildcard.replace('%', '(?:.*?)')
else:
wildcard = wildcard.replace('%', '(?:(?:[^%s])*?)' % re.escape(delim))
return re.compile(wildcard, re.I)
def splitQuoted(s):
"""Split a string into whitespace delimited tokens
Tokens that would otherwise be separated but are surrounded by \"
remain as a single token. Any token that is not quoted and is
equal to \"NIL\" is tokenized as C{None}.
@type s: C{str}
@param s: The string to be split
@rtype: C{list} of C{str}
@return: A list of the resulting tokens
@raise MismatchedQuoting: Raised if an odd number of quotes are present
"""
s = s.strip()
result = []
word = []
inQuote = inWord = False
for i, c in enumerate(s):
if c == '"':
if i and s[i-1] == '\\':
word.pop()
word.append('"')
elif not inQuote:
inQuote = True
else:
inQuote = False
result.append(''.join(word))
word = []
elif not inWord and not inQuote and c not in ('"' + string.whitespace):
inWord = True
word.append(c)
elif inWord and not inQuote and c in string.whitespace:
w = ''.join(word)
if w == 'NIL':
result.append(None)
else:
result.append(w)
word = []
inWord = False
elif inWord or inQuote:
word.append(c)
if inQuote:
raise MismatchedQuoting(s)
if inWord:
w = ''.join(word)
if w == 'NIL':
result.append(None)
else:
result.append(w)
return result
def splitOn(sequence, predicate, transformers):
result = []
mode = predicate(sequence[0])
tmp = [sequence[0]]
for e in sequence[1:]:
p = predicate(e)
if p != mode:
result.extend(transformers[mode](tmp))
tmp = [e]
mode = p
else:
tmp.append(e)
result.extend(transformers[mode](tmp))
return result
def collapseStrings(results):
"""
Turns a list of length-one strings and lists into a list of longer
strings and lists. For example,
['a', 'b', ['c', 'd']] is returned as ['ab', ['cd']]
@type results: C{list} of C{str} and C{list}
@param results: The list to be collapsed
@rtype: C{list} of C{str} and C{list}
@return: A new list which is the collapsed form of C{results}
"""
copy = []
begun = None
listsList = [isinstance(s, types.ListType) for s in results]
pred = lambda e: isinstance(e, types.TupleType)
tran = {
0: lambda e: splitQuoted(''.join(e)),
1: lambda e: [''.join([i[0] for i in e])]
}
for (i, c, isList) in zip(range(len(results)), results, listsList):
if isList:
if begun is not None:
copy.extend(splitOn(results[begun:i], pred, tran))
begun = None
copy.append(collapseStrings(c))
elif begun is None:
begun = i
if begun is not None:
copy.extend(splitOn(results[begun:], pred, tran))
return copy
def parseNestedParens(s, handleLiteral = 1):
"""Parse an s-exp-like string into a more useful data structure.
@type s: C{str}
@param s: The s-exp-like string to parse
@rtype: C{list} of C{str} and C{list}
@return: A list containing the tokens present in the input.
@raise MismatchedNesting: Raised if the number or placement
of opening or closing parenthesis is invalid.
"""
s = s.strip()
inQuote = 0
contentStack = [[]]
try:
i = 0
L = len(s)
while i < L:
c = s[i]
if inQuote:
if c == '\\':
contentStack[-1].append(s[i:i+2])
i += 2
continue
elif c == '"':
inQuote = not inQuote
contentStack[-1].append(c)
i += 1
else:
if c == '"':
contentStack[-1].append(c)
inQuote = not inQuote
i += 1
elif handleLiteral and c == '{':
end = s.find('}', i)
if end == -1:
raise ValueError, "Malformed literal"
literalSize = int(s[i+1:end])
contentStack[-1].append((s[end+3:end+3+literalSize],))
i = end + 3 + literalSize
elif c == '(' or c == '[':
contentStack.append([])
i += 1
elif c == ')' or c == ']':
contentStack[-2].append(contentStack.pop())
i += 1
else:
contentStack[-1].append(c)
i += 1
except IndexError:
raise MismatchedNesting(s)
if len(contentStack) != 1:
raise MismatchedNesting(s)
return collapseStrings(contentStack[0])
def _quote(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
def _literal(s):
return '{%d}\r\n%s' % (len(s), s)
class DontQuoteMe:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
_ATOM_SPECIALS = '(){ %*"'
def _needsQuote(s):
if s == '':
return 1
for c in s:
if c < '\x20' or c > '\x7f':
return 1
if c in _ATOM_SPECIALS:
return 1
return 0
def _prepareMailboxName(name):
name = name.encode('imap4-utf-7')
if _needsQuote(name):
return _quote(name)
return name
def _needsLiteral(s):
# Change this to "return 1" to wig out stupid clients
return '\n' in s or '\r' in s or len(s) > 1000
def collapseNestedLists(items):
"""Turn a nested list structure into an s-exp-like string.
Strings in C{items} will be sent as literals if they contain CR or LF,
otherwise they will be quoted. References to None in C{items} will be
translated to the atom NIL. Objects with a 'read' attribute will have
it called on them with no arguments and the returned string will be
inserted into the output as a literal. Integers will be converted to
strings and inserted into the output unquoted. Instances of
C{DontQuoteMe} will be converted to strings and inserted into the output
unquoted.
This function used to be much nicer, and only quote things that really
needed to be quoted (and C{DontQuoteMe} did not exist), however, many
broken IMAP4 clients were unable to deal with this level of sophistication,
forcing the current behavior to be adopted for practical reasons.
@type items: Any iterable
@rtype: C{str}
"""
pieces = []
for i in items:
if i is None:
pieces.extend([' ', 'NIL'])
elif isinstance(i, (DontQuoteMe, int, long)):
pieces.extend([' ', str(i)])
elif isinstance(i, types.StringTypes):
if _needsLiteral(i):
pieces.extend([' ', '{', str(len(i)), '}', IMAP4Server.delimiter, i])
else:
pieces.extend([' ', _quote(i)])
elif hasattr(i, 'read'):
d = i.read()
pieces.extend([' ', '{', str(len(d)), '}', IMAP4Server.delimiter, d])
else:
pieces.extend([' ', '(%s)' % (collapseNestedLists(i),)])
return ''.join(pieces[1:])
class IClientAuthentication(Interface):
def getName():
"""Return an identifier associated with this authentication scheme.
@rtype: C{str}
"""
def challengeResponse(secret, challenge):
"""Generate a challenge response string"""
class CramMD5ClientAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "CRAM-MD5"
def challengeResponse(self, secret, chal):
response = hmac.HMAC(secret, chal).hexdigest()
return '%s %s' % (self.user, response)
class LOGINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
self.challengeResponse = self.challengeUsername
def getName(self):
return "LOGIN"
def challengeUsername(self, secret, chal):
# Respond to something like "Username:"
self.challengeResponse = self.challengeSecret
return self.user
def challengeSecret(self, secret, chal):
# Respond to something like "Password:"
return secret
class PLAINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "PLAIN"
def challengeResponse(self, secret, chal):
return '\0%s\0%s' % (self.user, secret)
class MailboxException(IMAP4Exception): pass
class MailboxCollision(MailboxException):
def __str__(self):
return 'Mailbox named %s already exists' % self.args
class NoSuchMailbox(MailboxException):
def __str__(self):
return 'No mailbox named %s exists' % self.args
class ReadOnlyMailbox(MailboxException):
def __str__(self):
return 'Mailbox open in read-only state'
class IAccount(Interface):
"""Interface for Account classes
Implementors of this interface should consider implementing
C{INamespacePresenter}.
"""
def addMailbox(name, mbox = None):
"""Add a new mailbox to this account
@type name: C{str}
@param name: The name associated with this mailbox. It may not
contain multiple hierarchical parts.
@type mbox: An object implementing C{IMailbox}
@param mbox: The mailbox to associate with this name. If C{None},
a suitable default is created and used.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added for
some reason. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def create(pathspec):
"""Create a new mailbox from the given hierarchical name.
@type pathspec: C{str}
@param pathspec: The full hierarchical name of a new mailbox to create.
If any of the inferior hierarchical names to this one do not exist,
they are created as well.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added.
This may also be raised asynchronously, if a C{Deferred} is
returned.
"""
def select(name, rw=True):
"""Acquire a mailbox, given its name.
@type name: C{str}
@param name: The mailbox to acquire
@type rw: C{bool}
@param rw: If a true value, request a read-write version of this
mailbox. If a false value, request a read-only version.
@rtype: Any object implementing C{IMailbox} or C{Deferred}
@return: The mailbox object, or a C{Deferred} whose callback will
be invoked with the mailbox object. None may be returned if the
specified mailbox may not be selected for any reason.
"""
def delete(name):
"""Delete the mailbox with the specified name.
@type name: C{str}
@param name: The mailbox to delete.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully deleted, or a
C{Deferred} whose callback will be invoked when the deletion
completes.
@raise MailboxException: Raised if this mailbox cannot be deleted.
This may also be raised asynchronously, if a C{Deferred} is returned.
"""
def rename(oldname, newname):
"""Rename a mailbox
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to associate with the mailbox.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully renamed, or a
C{Deferred} whose callback will be invoked when the rename operation
is completed.
@raise MailboxException: Raised if this mailbox cannot be
renamed. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def isSubscribed(name):
"""Check the subscription status of a mailbox
@type name: C{str}
@param name: The name of the mailbox to check
@rtype: C{Deferred} or C{bool}
@return: A true value if the given mailbox is currently subscribed
to, a false value otherwise. A C{Deferred} may also be returned
whose callback will be invoked with one of these values.
"""
def subscribe(name):
"""Subscribe to a mailbox
@type name: C{str}
@param name: The name of the mailbox to subscribe to
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is subscribed to successfully,
or a Deferred whose callback will be invoked with this value when
the subscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
subscribed to. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def unsubscribe(name):
"""Unsubscribe from a mailbox
@type name: C{str}
@param name: The name of the mailbox to unsubscribe from
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is unsubscribed from successfully,
or a Deferred whose callback will be invoked with this value when
the unsubscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
unsubscribed from. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def listMailboxes(ref, wildcard):
"""List all the mailboxes that meet a certain criteria
@type ref: C{str}
@param ref: The context in which to apply the wildcard
@type wildcard: C{str}
@param wildcard: An expression against which to match mailbox names.
'*' matches any number of characters in a mailbox name, and '%'
matches similarly, but will not match across hierarchical boundaries.
@rtype: C{list} of C{tuple}
@return: A list of C{(mailboxName, mailboxObject)} which meet the
given criteria. C{mailboxObject} should implement either
C{IMailboxInfo} or C{IMailbox}. A Deferred may also be returned.
"""
class INamespacePresenter(Interface):
def getPersonalNamespaces():
"""Report the available personal namespaces.
Typically there should be only one personal namespace. A common
name for it is \"\", and its hierarchical delimiter is usually
\"/\".
@rtype: iterable of two-tuples of strings
@return: The personal namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getSharedNamespaces():
"""Report the available shared namespaces.
Shared namespaces do not belong to any individual user but are
usually to one or more of them. Examples of shared namespaces
might be \"#news\" for a usenet gateway.
@rtype: iterable of two-tuples of strings
@return: The shared namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getUserNamespaces():
"""Report the available user namespaces.
These are namespaces that contain folders belonging to other users
access to which this account has been granted.
@rtype: iterable of two-tuples of strings
@return: The user namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
class MemoryAccount(object):
implements(IAccount, INamespacePresenter)
mailboxes = None
subscriptions = None
top_id = 0
def __init__(self, name):
self.name = name
self.mailboxes = {}
self.subscriptions = []
def allocateID(self):
id = self.top_id
self.top_id += 1
return id
##
## IAccount
##
def addMailbox(self, name, mbox = None):
name = name.upper()
if name in self.mailboxes:
raise MailboxCollision, name
if mbox is None:
mbox = self._emptyMailbox(name, self.allocateID())
self.mailboxes[name] = mbox
return 1
def create(self, pathspec):
paths = filter(None, pathspec.split('/'))
for accum in range(1, len(paths)):
try:
self.addMailbox('/'.join(paths[:accum]))
except MailboxCollision:
pass
try:
self.addMailbox('/'.join(paths))
except MailboxCollision:
if not pathspec.endswith('/'):
return False
return True
def _emptyMailbox(self, name, id):
raise NotImplementedError
def select(self, name, readwrite=1):
return self.mailboxes.get(name.upper())
def delete(self, name):
name = name.upper()
# See if this mailbox exists at all
mbox = self.mailboxes.get(name)
if not mbox:
raise MailboxException("No such mailbox")
# See if this box is flagged \Noselect
if r'\Noselect' in mbox.getFlags():
# Check for hierarchically inferior mailboxes with this one
# as part of their root.
for others in self.mailboxes.keys():
if others != name and others.startswith(name):
raise MailboxException, "Hierarchically inferior mailboxes exist and \\Noselect is set"
mbox.destroy()
# iff there are no hierarchically inferior names, we will
# delete it from our ken.
if self._inferiorNames(name) > 1:
del self.mailboxes[name]
def rename(self, oldname, newname):
oldname = oldname.upper()
newname = newname.upper()
if oldname not in self.mailboxes:
raise NoSuchMailbox, oldname
inferiors = self._inferiorNames(oldname)
inferiors = [(o, o.replace(oldname, newname, 1)) for o in inferiors]
for (old, new) in inferiors:
if new in self.mailboxes:
raise MailboxCollision, new
for (old, new) in inferiors:
self.mailboxes[new] = self.mailboxes[old]
del self.mailboxes[old]
def _inferiorNames(self, name):
inferiors = []
for infname in self.mailboxes.keys():
if infname.startswith(name):
inferiors.append(infname)
return inferiors
def isSubscribed(self, name):
return name.upper() in self.subscriptions
def subscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
self.subscriptions.append(name)
def unsubscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
raise MailboxException, "Not currently subscribed to " + name
self.subscriptions.remove(name)
def listMailboxes(self, ref, wildcard):
ref = self._inferiorNames(ref.upper())
wildcard = wildcardToRegexp(wildcard, '/')
return [(i, self.mailboxes[i]) for i in ref if wildcard.match(i)]
##
## INamespacePresenter
##
def getPersonalNamespaces(self):
return [["", "/"]]
def getSharedNamespaces(self):
return None
def getOtherNamespaces(self):
return None
_statusRequestDict = {
'MESSAGES': 'getMessageCount',
'RECENT': 'getRecentCount',
'UIDNEXT': 'getUIDNext',
'UIDVALIDITY': 'getUIDValidity',
'UNSEEN': 'getUnseenCount'
}
def statusRequestHelper(mbox, names):
r = {}
for n in names:
r[n] = getattr(mbox, _statusRequestDict[n.upper()])()
return r
def parseAddr(addr):
if addr is None:
return [(None, None, None),]
addr = email.Utils.getaddresses([addr])
return [[fn or None, None] + address.split('@') for fn, address in addr]
def getEnvelope(msg):
headers = msg.getHeaders(True)
date = headers.get('date')
subject = headers.get('subject')
from_ = headers.get('from')
sender = headers.get('sender', from_)
reply_to = headers.get('reply-to', from_)
to = headers.get('to')
cc = headers.get('cc')
bcc = headers.get('bcc')
in_reply_to = headers.get('in-reply-to')
mid = headers.get('message-id')
return (date, subject, parseAddr(from_), parseAddr(sender),
reply_to and parseAddr(reply_to), to and parseAddr(to),
cc and parseAddr(cc), bcc and parseAddr(bcc), in_reply_to, mid)
def getLineCount(msg):
# XXX - Super expensive, CACHE THIS VALUE FOR LATER RE-USE
# XXX - This must be the number of lines in the ENCODED version
lines = 0
for _ in msg.getBodyFile():
lines += 1
return lines
def unquote(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def _getContentType(msg):
"""
Return a two-tuple of the main and subtype of the given message.
"""
attrs = None
mm = msg.getHeaders(False, 'content-type').get('content-type', None)
if mm:
mm = ''.join(mm.splitlines())
mimetype = mm.split(';')
if mimetype:
type = mimetype[0].split('/', 1)
if len(type) == 1:
major = type[0]
minor = None
elif len(type) == 2:
major, minor = type
else:
major = minor = None
attrs = dict(x.strip().lower().split('=', 1) for x in mimetype[1:])
else:
major = minor = None
else:
major = minor = None
return major, minor, attrs
def _getMessageStructure(message):
"""
Construct an appropriate type of message structure object for the given
message object.
@param message: A L{IMessagePart} provider
@return: A L{_MessageStructure} instance of the most specific type available
for the given message, determined by inspecting the MIME type of the
message.
"""
main, subtype, attrs = _getContentType(message)
if main is not None:
main = main.lower()
if subtype is not None:
subtype = subtype.lower()
if main == 'multipart':
return _MultipartMessageStructure(message, subtype, attrs)
elif (main, subtype) == ('message', 'rfc822'):
return _RFC822MessageStructure(message, main, subtype, attrs)
elif main == 'text':
return _TextMessageStructure(message, main, subtype, attrs)
else:
return _SinglepartMessageStructure(message, main, subtype, attrs)
class _MessageStructure(object):
"""
L{_MessageStructure} is a helper base class for message structure classes
representing the structure of particular kinds of messages, as defined by
their MIME type.
"""
def __init__(self, message, attrs):
"""
@param message: An L{IMessagePart} provider which this structure object
reports on.
@param attrs: A C{dict} giving the parameters of the I{Content-Type}
header of the message.
"""
self.message = message
self.attrs = attrs
def _disposition(self, disp):
"""
Parse a I{Content-Disposition} header into a two-sequence of the
disposition and a flattened list of its parameters.
@return: C{None} if there is no disposition header value, a C{list} with
two elements otherwise.
"""
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
# XXX Poorly tested parser
params = [x for param in disp[1:] for x in param.split('=', 1)]
disp = [disp[0].lower(), params]
return disp
else:
return None
def _unquotedAttrs(self):
"""
@return: The I{Content-Type} parameters, unquoted, as a flat list with
each Nth element giving a parameter name and N+1th element giving
the corresponding parameter value.
"""
if self.attrs:
unquoted = [(k, unquote(v)) for (k, v) in self.attrs.iteritems()]
return [y for x in sorted(unquoted) for y in x]
return None
class _SinglepartMessageStructure(_MessageStructure):
"""
L{_SinglepartMessageStructure} represents the message structure of a
non-I{multipart/*} message.
"""
_HEADERS = [
'content-id', 'content-description',
'content-transfer-encoding']
def __init__(self, message, main, subtype, attrs):
"""
@param message: An L{IMessagePart} provider which this structure object
reports on.
@param main: A C{str} giving the main MIME type of the message (for
example, C{"text"}).
@param subtype: A C{str} giving the MIME subtype of the message (for
example, C{"plain"}).
@param attrs: A C{dict} giving the parameters of the I{Content-Type}
header of the message.
"""
_MessageStructure.__init__(self, message, attrs)
self.main = main
self.subtype = subtype
self.attrs = attrs
def _basicFields(self):
"""
Return a list of the basic fields for a single-part message.
"""
headers = self.message.getHeaders(False, *self._HEADERS)
# Number of octets total
size = self.message.getSize()
major, minor = self.main, self.subtype
# content-type parameter list
unquotedAttrs = self._unquotedAttrs()
return [
major, minor, unquotedAttrs,
headers.get('content-id'),
headers.get('content-description'),
headers.get('content-transfer-encoding'),
size,
]
def encode(self, extended):
"""
Construct and return a list of the basic and extended fields for a
single-part message. The list suitable to be encoded into a BODY or
BODYSTRUCTURE response.
"""
result = self._basicFields()
if extended:
result.extend(self._extended())
return result
def _extended(self):
"""
The extension data of a non-multipart body part are in the
following order:
1. body MD5
A string giving the body MD5 value as defined in [MD5].
2. body disposition
A parenthesized list with the same content and function as
the body disposition for a multipart body part.
3. body language
A string or parenthesized list giving the body language
value as defined in [LANGUAGE-TAGS].
4. body location
A string list giving the body content URI as defined in
[LOCATION].
"""
result = []
headers = self.message.getHeaders(
False, 'content-md5', 'content-disposition',
'content-language', 'content-language')
result.append(headers.get('content-md5'))
result.append(self._disposition(headers.get('content-disposition')))
result.append(headers.get('content-language'))
result.append(headers.get('content-location'))
return result
class _TextMessageStructure(_SinglepartMessageStructure):
"""
L{_TextMessageStructure} represents the message structure of a I{text/*}
message.
"""
def encode(self, extended):
"""
A body type of type TEXT contains, immediately after the basic
fields, the size of the body in text lines. Note that this
size is the size in its content transfer encoding and not the
resulting size after any decoding.
"""
result = _SinglepartMessageStructure._basicFields(self)
result.append(getLineCount(self.message))
if extended:
result.extend(self._extended())
return result
class _RFC822MessageStructure(_SinglepartMessageStructure):
"""
L{_RFC822MessageStructure} represents the message structure of a
I{message/rfc822} message.
"""
def encode(self, extended):
"""
A body type of type MESSAGE and subtype RFC822 contains,
immediately after the basic fields, the envelope structure,
body structure, and size in text lines of the encapsulated
message.
"""
result = _SinglepartMessageStructure.encode(self, extended)
contained = self.message.getSubPart(0)
result.append(getEnvelope(contained))
result.append(getBodyStructure(contained, False))
result.append(getLineCount(contained))
return result
class _MultipartMessageStructure(_MessageStructure):
"""
L{_MultipartMessageStructure} represents the message structure of a
I{multipart/*} message.
"""
def __init__(self, message, subtype, attrs):
"""
@param message: An L{IMessagePart} provider which this structure object
reports on.
@param subtype: A C{str} giving the MIME subtype of the message (for
example, C{"plain"}).
@param attrs: A C{dict} giving the parameters of the I{Content-Type}
header of the message.
"""
_MessageStructure.__init__(self, message, attrs)
self.subtype = subtype
def _getParts(self):
"""
Return an iterator over all of the sub-messages of this message.
"""
i = 0
while True:
try:
part = self.message.getSubPart(i)
except IndexError:
break
else:
yield part
i += 1
def encode(self, extended):
"""
Encode each sub-message and added the additional I{multipart} fields.
"""
result = [_getMessageStructure(p).encode(extended) for p in self._getParts()]
result.append(self.subtype)
if extended:
result.extend(self._extended())
return result
def _extended(self):
"""
The extension data of a multipart body part are in the following order:
1. body parameter parenthesized list
A parenthesized list of attribute/value pairs [e.g., ("foo"
"bar" "baz" "rag") where "bar" is the value of "foo", and
"rag" is the value of "baz"] as defined in [MIME-IMB].
2. body disposition
A parenthesized list, consisting of a disposition type
string, followed by a parenthesized list of disposition
attribute/value pairs as defined in [DISPOSITION].
3. body language
A string or parenthesized list giving the body language
value as defined in [LANGUAGE-TAGS].
4. body location
A string list giving the body content URI as defined in
[LOCATION].
"""
result = []
headers = self.message.getHeaders(
False, 'content-language', 'content-location',
'content-disposition')
result.append(self._unquotedAttrs())
result.append(self._disposition(headers.get('content-disposition')))
result.append(headers.get('content-language', None))
result.append(headers.get('content-location', None))
return result
def getBodyStructure(msg, extended=False):
"""
RFC 3501, 7.4.2, BODYSTRUCTURE::
A parenthesized list that describes the [MIME-IMB] body structure of a
message. This is computed by the server by parsing the [MIME-IMB] header
fields, defaulting various fields as necessary.
For example, a simple text message of 48 lines and 2279 octets can have
a body structure of: ("TEXT" "PLAIN" ("CHARSET" "US-ASCII") NIL NIL
"7BIT" 2279 48)
This is represented as::
["TEXT", "PLAIN", ["CHARSET", "US-ASCII"], None, None, "7BIT", 2279, 48]
These basic fields are documented in the RFC as:
1. body type
A string giving the content media type name as defined in
[MIME-IMB].
2. body subtype
A string giving the content subtype name as defined in
[MIME-IMB].
3. body parameter parenthesized list
A parenthesized list of attribute/value pairs [e.g., ("foo"
"bar" "baz" "rag") where "bar" is the value of "foo" and
"rag" is the value of "baz"] as defined in [MIME-IMB].
4. body id
A string giving the content id as defined in [MIME-IMB].
5. body description
A string giving the content description as defined in
[MIME-IMB].
6. body encoding
A string giving the content transfer encoding as defined in
[MIME-IMB].
7. body size
A number giving the size of the body in octets. Note that this size is
the size in its transfer encoding and not the resulting size after any
decoding.
Put another way, the body structure is a list of seven elements. The
semantics of the elements of this list are:
1. Byte string giving the major MIME type
2. Byte string giving the minor MIME type
3. A list giving the Content-Type parameters of the message
4. A byte string giving the content identifier for the message part, or
None if it has no content identifier.
5. A byte string giving the content description for the message part, or
None if it has no content description.
6. A byte string giving the Content-Encoding of the message body
7. An integer giving the number of octets in the message body
The RFC goes on::
Multiple parts are indicated by parenthesis nesting. Instead of a body
type as the first element of the parenthesized list, there is a sequence
of one or more nested body structures. The second element of the
parenthesized list is the multipart subtype (mixed, digest, parallel,
alternative, etc.).
For example, a two part message consisting of a text and a
BASE64-encoded text attachment can have a body structure of: (("TEXT"
"PLAIN" ("CHARSET" "US-ASCII") NIL NIL "7BIT" 1152 23)("TEXT" "PLAIN"
("CHARSET" "US-ASCII" "NAME" "cc.diff")
"<960723163407.20117h@cac.washington.edu>" "Compiler diff" "BASE64" 4554
73) "MIXED")
This is represented as::
[["TEXT", "PLAIN", ["CHARSET", "US-ASCII"], None, None, "7BIT", 1152,
23],
["TEXT", "PLAIN", ["CHARSET", "US-ASCII", "NAME", "cc.diff"],
"<960723163407.20117h@cac.washington.edu>", "Compiler diff",
"BASE64", 4554, 73],
"MIXED"]
In other words, a list of N + 1 elements, where N is the number of parts in
the message. The first N elements are structures as defined by the previous
section. The last element is the minor MIME subtype of the multipart
message.
Additionally, the RFC describes extension data::
Extension data follows the multipart subtype. Extension data is never
returned with the BODY fetch, but can be returned with a BODYSTRUCTURE
fetch. Extension data, if present, MUST be in the defined order.
The C{extended} flag controls whether extension data might be returned with
the normal data.
"""
return _getMessageStructure(msg).encode(extended)
class IMessagePart(Interface):
def getHeaders(negate, *names):
"""Retrieve a group of message headers.
@type names: C{tuple} of C{str}
@param names: The names of the headers to retrieve or omit.
@type negate: C{bool}
@param negate: If True, indicates that the headers listed in C{names}
should be omitted from the return value, rather than included.
@rtype: C{dict}
@return: A mapping of header field names to header field values
"""
def getBodyFile():
"""Retrieve a file object containing only the body of this message.
"""
def getSize():
"""Retrieve the total size, in octets, of this message.
@rtype: C{int}
"""
def isMultipart():
"""Indicate whether this message has subparts.
@rtype: C{bool}
"""
def getSubPart(part):
"""Retrieve a MIME sub-message
@type part: C{int}
@param part: The number of the part to retrieve, indexed from 0.
@raise IndexError: Raised if the specified part does not exist.
@raise TypeError: Raised if this message is not multipart.
@rtype: Any object implementing C{IMessagePart}.
@return: The specified sub-part.
"""
class IMessage(IMessagePart):
def getUID():
"""Retrieve the unique identifier associated with this message.
"""
def getFlags():
"""Retrieve the flags associated with this message.
@rtype: C{iterable}
@return: The flags, represented as strings.
"""
def getInternalDate():
"""Retrieve the date internally associated with this message.
@rtype: C{str}
@return: An RFC822-formatted date string.
"""
class IMessageFile(Interface):
"""Optional message interface for representing messages as files.
If provided by message objects, this interface will be used instead
the more complex MIME-based interface.
"""
def open():
"""Return an file-like object opened for reading.
Reading from the returned file will return all the bytes
of which this message consists.
"""
class ISearchableMailbox(Interface):
def search(query, uid):
"""Search for messages that meet the given query criteria.
If this interface is not implemented by the mailbox, L{IMailbox.fetch}
and various methods of L{IMessage} will be used instead.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
@type query: C{list}
@param query: The search criteria
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{list} or C{Deferred}
@return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback will be
invoked with such a list.
@raise IllegalQueryError: Raised when query is not valid.
"""
class IMessageCopier(Interface):
def copy(messageObject):
"""Copy the given message object into this mailbox.
The message object will be one which was previously returned by
L{IMailbox.fetch}.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
If this interface is not implemented by the mailbox, IMailbox.addMessage
will be used instead.
@rtype: C{Deferred} or C{int}
@return: Either the UID of the message or a Deferred which fires
with the UID when the copy finishes.
"""
class IMailboxInfo(Interface):
"""Interface specifying only the methods required for C{listMailboxes}.
Implementations can return objects implementing only these methods for
return to C{listMailboxes} if it can allow them to operate more
efficiently.
"""
def getFlags():
"""Return the flags defined in this mailbox
Flags with the \\ prefix are reserved for use as system flags.
@rtype: C{list} of C{str}
@return: A list of the flags that can be set on messages in this mailbox.
"""
def getHierarchicalDelimiter():
"""Get the character which delimits namespaces for in this mailbox.
@rtype: C{str}
"""
class IMailbox(IMailboxInfo):
def getUIDValidity():
"""Return the unique validity identifier for this mailbox.
@rtype: C{int}
"""
def getUIDNext():
"""Return the likely UID for the next message added to this mailbox.
@rtype: C{int}
"""
def getUID(message):
"""Return the UID of a message in the mailbox
@type message: C{int}
@param message: The message sequence number
@rtype: C{int}
@return: The UID of the message.
"""
def getMessageCount():
"""Return the number of messages in this mailbox.
@rtype: C{int}
"""
def getRecentCount():
"""Return the number of messages with the 'Recent' flag.
@rtype: C{int}
"""
def getUnseenCount():
"""Return the number of messages with the 'Unseen' flag.
@rtype: C{int}
"""
def isWriteable():
"""Get the read/write status of the mailbox.
@rtype: C{int}
@return: A true value if write permission is allowed, a false value otherwise.
"""
def destroy():
"""Called before this mailbox is deleted, permanently.
If necessary, all resources held by this mailbox should be cleaned
up here. This function _must_ set the \\Noselect flag on this
mailbox.
"""
def requestStatus(names):
"""Return status information about this mailbox.
Mailboxes which do not intend to do any special processing to
generate the return value, C{statusRequestHelper} can be used
to build the dictionary by calling the other interface methods
which return the data for each name.
@type names: Any iterable
@param names: The status names to return information regarding.
The possible values for each name are: MESSAGES, RECENT, UIDNEXT,
UIDVALIDITY, UNSEEN.
@rtype: C{dict} or C{Deferred}
@return: A dictionary containing status information about the
requested names is returned. If the process of looking this
information up would be costly, a deferred whose callback will
eventually be passed this dictionary is returned instead.
"""
def addListener(listener):
"""Add a mailbox change listener
@type listener: Any object which implements C{IMailboxListener}
@param listener: An object to add to the set of those which will
be notified when the contents of this mailbox change.
"""
def removeListener(listener):
"""Remove a mailbox change listener
@type listener: Any object previously added to and not removed from
this mailbox as a listener.
@param listener: The object to remove from the set of listeners.
@raise ValueError: Raised when the given object is not a listener for
this mailbox.
"""
def addMessage(message, flags = (), date = None):
"""Add the given message to this mailbox.
@type message: A file-like object
@param message: The RFC822 formatted message
@type flags: Any iterable of C{str}
@param flags: The flags to associate with this message
@type date: C{str}
@param date: If specified, the date to associate with this
message.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with the message
id if the message is added successfully and whose errback is
invoked otherwise.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def expunge():
"""Remove all messages flagged \\Deleted.
@rtype: C{list} or C{Deferred}
@return: The list of message sequence numbers which were deleted,
or a C{Deferred} whose callback will be invoked with such a list.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def fetch(messages, uid):
"""Retrieve one or more messages.
@type messages: C{MessageSet}
@param messages: The identifiers of messages to retrieve information
about
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: Any iterable of two-tuples of message sequence numbers and
implementors of C{IMessage}.
"""
def store(messages, flags, mode, uid):
"""Set the flags of one or more messages.
@type messages: A MessageSet object with the list of messages requested
@param messages: The identifiers of the messages to set the flags of.
@type flags: sequence of C{str}
@param flags: The flags to set, unset, or add.
@type mode: -1, 0, or 1
@param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be added to
the specified messages. If mode is 0, all existing flags should be
cleared and these flags should be added.
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{dict} or C{Deferred}
@return: A C{dict} mapping message sequence numbers to sequences of C{str}
representing the flags set on the message after this operation has
been performed, or a C{Deferred} whose callback will be invoked with
such a C{dict}.
@raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
class ICloseableMailbox(Interface):
"""A supplementary interface for mailboxes which require cleanup on close.
Implementing this interface is optional. If it is implemented, the protocol
code will call the close method defined whenever a mailbox is closed.
"""
def close():
"""Close this mailbox.
@return: A C{Deferred} which fires when this mailbox
has been closed, or None if the mailbox can be closed
immediately.
"""
def _formatHeaders(headers):
hdrs = [': '.join((k.title(), '\r\n'.join(v.splitlines()))) for (k, v)
in headers.iteritems()]
hdrs = '\r\n'.join(hdrs) + '\r\n'
return hdrs
def subparts(m):
i = 0
try:
while True:
yield m.getSubPart(i)
i += 1
except IndexError:
pass
def iterateInReactor(i):
"""Consume an interator at most a single iteration per reactor iteration.
If the iterator produces a Deferred, the next iteration will not occur
until the Deferred fires, otherwise the next iteration will be taken
in the next reactor iteration.
@rtype: C{Deferred}
@return: A deferred which fires (with None) when the iterator is
exhausted or whose errback is called if there is an exception.
"""
from twisted.internet import reactor
d = defer.Deferred()
def go(last):
try:
r = i.next()
except StopIteration:
d.callback(last)
except:
d.errback()
else:
if isinstance(r, defer.Deferred):
r.addCallback(go)
else:
reactor.callLater(0, go, r)
go(None)
return d
class MessageProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
def __init__(self, msg, buffer = None, scheduler = None):
"""Produce this message.
@param msg: The message I am to produce.
@type msg: L{IMessage}
@param buffer: A buffer to hold the message in. If None, I will
use a L{tempfile.TemporaryFile}.
@type buffer: file-like
"""
self.msg = msg
if buffer is None:
buffer = tempfile.TemporaryFile()
self.buffer = buffer
if scheduler is None:
scheduler = iterateInReactor
self.scheduler = scheduler
self.write = self.buffer.write
def beginProducing(self, consumer):
self.consumer = consumer
return self.scheduler(self._produce())
def _produce(self):
headers = self.msg.getHeaders(True)
boundary = None
if self.msg.isMultipart():
content = headers.get('content-type')
parts = [x.split('=', 1) for x in content.split(';')[1:]]
parts = dict([(k.lower().strip(), v) for (k, v) in parts])
boundary = parts.get('boundary')
if boundary is None:
# Bastards
boundary = '----=_%f_boundary_%f' % (time.time(), random.random())
headers['content-type'] += '; boundary="%s"' % (boundary,)
else:
if boundary.startswith('"') and boundary.endswith('"'):
boundary = boundary[1:-1]
self.write(_formatHeaders(headers))
self.write('\r\n')
if self.msg.isMultipart():
for p in subparts(self.msg):
self.write('\r\n--%s\r\n' % (boundary,))
yield MessageProducer(p, self.buffer, self.scheduler
).beginProducing(None
)
self.write('\r\n--%s--\r\n' % (boundary,))
else:
f = self.msg.getBodyFile()
while True:
b = f.read(self.CHUNK_SIZE)
if b:
self.buffer.write(b)
yield None
else:
break
if self.consumer:
self.buffer.seek(0, 0)
yield FileProducer(self.buffer
).beginProducing(self.consumer
).addCallback(lambda _: self
)
class _FetchParser:
class Envelope:
# Response should be a list of fields from the message:
# date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
# and message-id.
#
# from, sender, reply-to, to, cc, and bcc are themselves lists of
# address information:
# personal name, source route, mailbox name, host name
#
# reply-to and sender must not be None. If not present in a message
# they should be defaulted to the value of the from field.
type = 'envelope'
__str__ = lambda self: 'envelope'
class Flags:
type = 'flags'
__str__ = lambda self: 'flags'
class InternalDate:
type = 'internaldate'
__str__ = lambda self: 'internaldate'
class RFC822Header:
type = 'rfc822header'
__str__ = lambda self: 'rfc822.header'
class RFC822Text:
type = 'rfc822text'
__str__ = lambda self: 'rfc822.text'
class RFC822Size:
type = 'rfc822size'
__str__ = lambda self: 'rfc822.size'
class RFC822:
type = 'rfc822'
__str__ = lambda self: 'rfc822'
class UID:
type = 'uid'
__str__ = lambda self: 'uid'
class Body:
type = 'body'
peek = False
header = None
mime = None
text = None
part = ()
empty = False
partialBegin = None
partialLength = None
def __str__(self):
base = 'BODY'
part = ''
separator = ''
if self.part:
part = '.'.join([str(x + 1) for x in self.part])
separator = '.'
# if self.peek:
# base += '.PEEK'
if self.header:
base += '[%s%s%s]' % (part, separator, self.header,)
elif self.text:
base += '[%s%sTEXT]' % (part, separator)
elif self.mime:
base += '[%s%sMIME]' % (part, separator)
elif self.empty:
base += '[%s]' % (part,)
if self.partialBegin is not None:
base += '<%d.%d>' % (self.partialBegin, self.partialLength)
return base
class BodyStructure:
type = 'bodystructure'
__str__ = lambda self: 'bodystructure'
# These three aren't top-level, they don't need type indicators
class Header:
negate = False
fields = None
part = None
def __str__(self):
base = 'HEADER'
if self.fields:
base += '.FIELDS'
if self.negate:
base += '.NOT'
fields = []
for f in self.fields:
f = f.title()
if _needsQuote(f):
f = _quote(f)
fields.append(f)
base += ' (%s)' % ' '.join(fields)
if self.part:
base = '.'.join([str(x + 1) for x in self.part]) + '.' + base
return base
class Text:
pass
class MIME:
pass
parts = None
_simple_fetch_att = [
('envelope', Envelope),
('flags', Flags),
('internaldate', InternalDate),
('rfc822.header', RFC822Header),
('rfc822.text', RFC822Text),
('rfc822.size', RFC822Size),
('rfc822', RFC822),
('uid', UID),
('bodystructure', BodyStructure),
]
def __init__(self):
self.state = ['initial']
self.result = []
self.remaining = ''
def parseString(self, s):
s = self.remaining + s
try:
while s or self.state:
if not self.state:
raise IllegalClientResponse("Invalid Argument")
# print 'Entering state_' + self.state[-1] + ' with', repr(s)
state = self.state.pop()
try:
used = getattr(self, 'state_' + state)(s)
except:
self.state.append(state)
raise
else:
# print state, 'consumed', repr(s[:used])
s = s[used:]
finally:
self.remaining = s
def state_initial(self, s):
# In the initial state, the literals "ALL", "FULL", and "FAST"
# are accepted, as is a ( indicating the beginning of a fetch_att
# token, as is the beginning of a fetch_att token.
if s == '':
return 0
l = s.lower()
if l.startswith('all'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope()
))
return 3
if l.startswith('full'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope(),
self.Body()
))
return 4
if l.startswith('fast'):
self.result.extend((
self.Flags(), self.InternalDate(), self.RFC822Size(),
))
return 4
if l.startswith('('):
self.state.extend(('close_paren', 'maybe_fetch_att', 'fetch_att'))
return 1
self.state.append('fetch_att')
return 0
def state_close_paren(self, s):
if s.startswith(')'):
return 1
raise Exception("Missing )")
def state_whitespace(self, s):
# Eat up all the leading whitespace
if not s or not s[0].isspace():
raise Exception("Whitespace expected, none found")
i = 0
for i in range(len(s)):
if not s[i].isspace():
break
return i
def state_maybe_fetch_att(self, s):
if not s.startswith(')'):
self.state.extend(('maybe_fetch_att', 'fetch_att', 'whitespace'))
return 0
def state_fetch_att(self, s):
# Allowed fetch_att tokens are "ENVELOPE", "FLAGS", "INTERNALDATE",
# "RFC822", "RFC822.HEADER", "RFC822.SIZE", "RFC822.TEXT", "BODY",
# "BODYSTRUCTURE", "UID",
# "BODY [".PEEK"] [<section>] ["<" <number> "." <nz_number> ">"]
l = s.lower()
for (name, cls) in self._simple_fetch_att:
if l.startswith(name):
self.result.append(cls())
return len(name)
b = self.Body()
if l.startswith('body.peek'):
b.peek = True
used = 9
elif l.startswith('body'):
used = 4
else:
raise Exception("Nothing recognized in fetch_att: %s" % (l,))
self.pending_body = b
self.state.extend(('got_body', 'maybe_partial', 'maybe_section'))
return used
def state_got_body(self, s):
self.result.append(self.pending_body)
del self.pending_body
return 0
def state_maybe_section(self, s):
if not s.startswith("["):
return 0
self.state.extend(('section', 'part_number'))
return 1
_partExpr = re.compile(r'(\d+(?:\.\d+)*)\.?')
def state_part_number(self, s):
m = self._partExpr.match(s)
if m is not None:
self.parts = [int(p) - 1 for p in m.groups()[0].split('.')]
return m.end()
else:
self.parts = []
return 0
def state_section(self, s):
# Grab "HEADER]" or "HEADER.FIELDS (Header list)]" or
# "HEADER.FIELDS.NOT (Header list)]" or "TEXT]" or "MIME]" or
# just "]".
l = s.lower()
used = 0
if l.startswith(']'):
self.pending_body.empty = True
used += 1
elif l.startswith('header]'):
h = self.pending_body.header = self.Header()
h.negate = True
h.fields = ()
used += 7
elif l.startswith('text]'):
self.pending_body.text = self.Text()
used += 5
elif l.startswith('mime]'):
self.pending_body.mime = self.MIME()
used += 5
else:
h = self.Header()
if l.startswith('header.fields.not'):
h.negate = True
used += 17
elif l.startswith('header.fields'):
used += 13
else:
raise Exception("Unhandled section contents: %r" % (l,))
self.pending_body.header = h
self.state.extend(('finish_section', 'header_list', 'whitespace'))
self.pending_body.part = tuple(self.parts)
self.parts = None
return used
def state_finish_section(self, s):
if not s.startswith(']'):
raise Exception("section must end with ]")
return 1
def state_header_list(self, s):
if not s.startswith('('):
raise Exception("Header list must begin with (")
end = s.find(')')
if end == -1:
raise Exception("Header list must end with )")
headers = s[1:end].split()
self.pending_body.header.fields = map(str.upper, headers)
return end + 1
def state_maybe_partial(self, s):
# Grab <number.number> or nothing at all
if not s.startswith('<'):
return 0
end = s.find('>')
if end == -1:
raise Exception("Found < but not >")
partial = s[1:end]
parts = partial.split('.', 1)
if len(parts) != 2:
raise Exception("Partial specification did not include two .-delimited integers")
begin, length = map(int, parts)
self.pending_body.partialBegin = begin
self.pending_body.partialLength = length
return end + 1
class FileProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
firstWrite = True
def __init__(self, f):
self.f = f
def beginProducing(self, consumer):
self.consumer = consumer
self.produce = consumer.write
d = self._onDone = defer.Deferred()
self.consumer.registerProducer(self, False)
return d
def resumeProducing(self):
b = ''
if self.firstWrite:
b = '{%d}\r\n' % self._size()
self.firstWrite = False
if not self.f:
return
b = b + self.f.read(self.CHUNK_SIZE)
if not b:
self.consumer.unregisterProducer()
self._onDone.callback(self)
self._onDone = self.f = self.consumer = None
else:
self.produce(b)
def pauseProducing(self):
pass
def stopProducing(self):
pass
def _size(self):
b = self.f.tell()
self.f.seek(0, 2)
e = self.f.tell()
self.f.seek(b, 0)
return e - b
def parseTime(s):
# XXX - This may require localization :(
months = [
'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december'
]
expr = {
'day': r"(?P<day>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'mon': r"(?P<mon>\w+)",
'year': r"(?P<year>\d\d\d\d)"
}
m = re.match('%(day)s-%(mon)s-%(year)s' % expr, s)
if not m:
raise ValueError, "Cannot parse time string %r" % (s,)
d = m.groupdict()
try:
d['mon'] = 1 + (months.index(d['mon'].lower()) % 12)
d['year'] = int(d['year'])
d['day'] = int(d['day'])
except ValueError:
raise ValueError, "Cannot parse time string %r" % (s,)
else:
return time.struct_time(
(d['year'], d['mon'], d['day'], 0, 0, 0, -1, -1, -1)
)
import codecs
def modified_base64(s):
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace('/', ',')
def modified_unbase64(s):
s_utf7 = '+' + s.replace(',', '/') + '-'
return s_utf7.decode('utf-7')
def encoder(s, errors=None):
"""
Encode the given C{unicode} string using the IMAP4 specific variation of
UTF-7.
@type s: C{unicode}
@param s: The text to encode.
@param errors: Policy for handling encoding errors. Currently ignored.
@return: C{tuple} of a C{str} giving the encoded bytes and an C{int}
giving the number of code units consumed from the input.
"""
r = []
_in = []
for c in s:
if ord(c) in (range(0x20, 0x26) + range(0x27, 0x7f)):
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append(str(c))
elif c == '&':
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append('&-')
else:
_in.append(c)
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
return (''.join(r), len(s))
def decoder(s, errors=None):
"""
Decode the given C{str} using the IMAP4 specific variation of UTF-7.
@type s: C{str}
@param s: The bytes to decode.
@param errors: Policy for handling decoding errors. Currently ignored.
@return: a C{tuple} of a C{unicode} string giving the text which was
decoded and an C{int} giving the number of bytes consumed from the
input.
"""
r = []
decode = []
for c in s:
if c == '&' and not decode:
decode.append('&')
elif c == '-' and decode:
if len(decode) == 1:
r.append('&')
else:
r.append(modified_unbase64(''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c)
if decode:
r.append(modified_unbase64(''.join(decode[1:])))
return (''.join(r), len(s))
class StreamReader(codecs.StreamReader):
def decode(self, s, errors='strict'):
return decoder(s)
class StreamWriter(codecs.StreamWriter):
def encode(self, s, errors='strict'):
return encoder(s)
_codecInfo = (encoder, decoder, StreamReader, StreamWriter)
try:
_codecInfoClass = codecs.CodecInfo
except AttributeError:
pass
else:
_codecInfo = _codecInfoClass(*_codecInfo)
def imap4_utf_7(name):
if name == 'imap4-utf-7':
return _codecInfo
codecs.register(imap4_utf_7)
__all__ = [
# Protocol classes
'IMAP4Server', 'IMAP4Client',
# Interfaces
'IMailboxListener', 'IClientAuthentication', 'IAccount', 'IMailbox',
'INamespacePresenter', 'ICloseableMailbox', 'IMailboxInfo',
'IMessage', 'IMessageCopier', 'IMessageFile', 'ISearchableMailbox',
# Exceptions
'IMAP4Exception', 'IllegalClientResponse', 'IllegalOperation',
'IllegalMailboxEncoding', 'UnhandledResponse', 'NegativeResponse',
'NoSupportedAuthentication', 'IllegalServerResponse',
'IllegalIdentifierError', 'IllegalQueryError', 'MismatchedNesting',
'MismatchedQuoting', 'MailboxException', 'MailboxCollision',
'NoSuchMailbox', 'ReadOnlyMailbox',
# Auth objects
'CramMD5ClientAuthenticator', 'PLAINAuthenticator', 'LOGINAuthenticator',
'PLAINCredentials', 'LOGINCredentials',
# Simple query interface
'Query', 'Not', 'Or',
# Miscellaneous
'MemoryAccount',
'statusRequestHelper',
]
| mit |
mpalmi/clip | packages/scap-security-guide/scap-security-guide-0.1.25/shared/modules/verify_cce_module.py | 4 | 1790 | #!/usr/bin/python
import sys
import platform
from lxml import etree
# This script checks the validity of assigned CCEs, lists granted and remaining
# available CCEs, and checks for duplicates.
release = '%.0f' % float(platform.linux_distribution()[1])
xccdf_ns = "http://checklists.nist.gov/xccdf/1.1"
tree = etree.parse('../output/unlinked-rhel' + str(release) + '-xccdf.xml')
cces_assigned = tree.findall("//{%s}ident[@system='http://cce.mitre.org']"
% xccdf_ns)
assigned_ids = []
granted_ids = []
# print the list of assigned CCEs
print "Assigned CCEs:"
for item in cces_assigned:
print item.text
assigned_ids.append(item.text)
print "-------------"
# check for duplicates in the assigned CCE list
dup_assigned_ids = [item for item in cces_assigned if cces_assigned.count(item) > 1]
for item in dup_assigned_ids:
print "Duplicate assignment of CCE: %s" % item
# open the available CCE file
with open('../references/cce-rhel' + int(release) + '-avail.txt', 'r') as txt_file:
for line in txt_file:
granted_ids = [line.rstrip('\n') for line in txt_file]
# print CCEs that are available (i.e. in granted but not assigned)
for item in granted_ids:
if item not in assigned_ids:
print "Available CCE: %s" % item
for rule in tree.findall("//{%s}Rule" % xccdf_ns):
# print "rule is " + rule.get("id")
items = rule.findall("{%s}ident[@system='http://cce.mitre.org']" % xccdf_ns)
if len(items) > 1:
print "Rule with multiple CCEs assigned: %s" % rule.get("id")
if len(items) == 0:
print "Rule without CCE: %s" % rule.get("id")
for item in items:
if item.text not in granted_ids:
print "Invalid CCE: %s in %s" % (item.text, rule.get("id"))
sys.exit()
| apache-2.0 |
yosshy/nova | nova/tests/unit/api/openstack/compute/test_extended_volumes.py | 21 | 6485 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import (extended_volumes
as extended_volumes_v21)
from nova.api.openstack import wsgi as os_wsgi
from nova import compute
from nova import db
from nova import objects
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova import volume
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID1)
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list, fields)
def fake_bdms_get_all_by_instance(*args, **kwargs):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': UUID1, 'source_type': 'volume',
'destination_type': 'volume', 'id': 1,
'delete_on_termination': True}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': UUID2, 'source_type': 'volume',
'destination_type': 'volume', 'id': 2,
'delete_on_termination': False})]
def fake_volume_get(*args, **kwargs):
pass
class ExtendedVolumesTestV21(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-volumes:'
exp_volumes = [{'id': UUID1}, {'id': UUID2}]
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def setUp(self):
super(ExtendedVolumesTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
self._setUp()
self.app = self._setup_app()
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-extended-volumes',
'servers'))
def _setUp(self):
self.Controller = extended_volumes_v21.ExtendedVolumesController()
self.stubs.Set(volume.cinder.API, 'get', fake_volume_get)
self.action_url = "/%s/action" % UUID1
def _make_request(self, url, body=None):
req = webob.Request.blank('/v2/fake/servers' + url)
req.headers['Accept'] = self.content_type
req.headers = {os_wsgi.API_VERSION_REQUEST_HEADER:
self.wsgi_api_version}
if body:
req.body = jsonutils.dumps(body)
req.method = 'POST'
req.content_type = self.content_type
res = req.get_response(self.app)
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def test_show(self):
res = self._make_request('/%s' % UUID1)
self.assertEqual(200, res.status_int)
server = self._get_server(res.body)
actual = server.get('%svolumes_attached' % self.prefix)
self.assertEqual(self.exp_volumes, actual)
def test_detail(self):
res = self._make_request('/detail')
self.assertEqual(200, res.status_int)
for i, server in enumerate(self._get_servers(res.body)):
actual = server.get('%svolumes_attached' % self.prefix)
self.assertEqual(self.exp_volumes, actual)
class ExtendedVolumesTestV2(ExtendedVolumesTestV21):
def _setup_app(self):
return fakes.wsgi_app(init_only=('servers',))
def _setUp(self):
self.flags(
osapi_compute_extension=['nova.api.openstack.compute.'
'contrib.select_extensions'],
osapi_compute_ext_list=['Extended_volumes'])
class ExtendedVolumesTestV23(ExtendedVolumesTestV21):
exp_volumes = [{'id': UUID1, 'delete_on_termination': True},
{'id': UUID2, 'delete_on_termination': False}]
wsgi_api_version = '2.3'
class ExtendedVolumesEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ExtendedVolumesEnforcementV21, self).setUp()
self.controller = extended_volumes_v21.ExtendedVolumesController()
self.req = fakes.HTTPRequest.blank('')
@mock.patch.object(extended_volumes_v21.ExtendedVolumesController,
'_extend_server')
def test_extend_show_policy_failed(self, mock_extend):
rule_name = 'os_compute_api:os-extended-volumes'
self.policy.set_rules({rule_name: "project:non_fake"})
# Pass ResponseObj as None, the code shouldn't touch the None.
self.controller.show(self.req, None, fakes.FAKE_UUID)
self.assertFalse(mock_extend.called)
@mock.patch.object(extended_volumes_v21.ExtendedVolumesController,
'_extend_server')
def test_extend_detail_policy_failed(self, mock_extend):
rule_name = 'os_compute_api:os-extended-volumes'
self.policy.set_rules({rule_name: "project:non_fake"})
# Pass ResponseObj as None, the code shouldn't touch the None.
self.controller.detail(self.req, None)
self.assertFalse(mock_extend.called)
| apache-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_6_0/device_filter_set_broker.py | 14 | 51137 | from ..broker import Broker
class DeviceFilterSetBroker(Broker):
controller = "device_filter_sets"
def show(self, **kwargs):
"""Shows the details for the specified device filter set.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterSetID: The internal NetMRI identifier for this rule list.
:type DeviceFilterSetID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter set methods. The listed methods will be called on each device filter set returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_set: The device filter set identified by the specified DeviceFilterSetID.
:rtype device_filter_set: DeviceFilterSet
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device filter sets. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterSetID: The internal NetMRI identifier for this rule list.
:type DeviceFilterSetID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this rule list belongs.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device filter sets as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter set methods. The listed methods will be called on each device filter set returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterSetID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterSetID. Valid values are DeviceFilterSetID, DeviceID, DataSourceID, FltSetFirstSeenTime, FltSetStartTime, FltSetEndTime, FltSetTimestamp, FltSetChangedCols, FltSetName, FltSetIPVersion, FltSetUseCount, FltSetArtificialInd, FltSetConfigText, FltSetProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilterSet. Valid values are DeviceFilterSetID, DeviceID, DataSourceID, FltSetFirstSeenTime, FltSetStartTime, FltSetEndTime, FltSetTimestamp, FltSetChangedCols, FltSetName, FltSetIPVersion, FltSetUseCount, FltSetArtificialInd, FltSetConfigText, FltSetProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_sets: An array of the DeviceFilterSet objects that match the specified input criteria.
:rtype device_filter_sets: Array of DeviceFilterSet
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device filter sets matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterSetID: The internal NetMRI identifier for this rule list.
:type DeviceFilterSetID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this rule list belongs.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetArtificialInd: A flag indicating that this rule list has no counterpart in the device configuration.
:type FltSetArtificialInd: Array of Boolean
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetChangedCols: The fields that changed between this revision of the record and the previous revision.
:type FltSetChangedCols: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetConfigText: The original text of the definition of this rule list in the device configuration.
:type FltSetConfigText: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetEndTime: The ending effective time of this record, or empty if still in effect.
:type FltSetEndTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetFirstSeenTime: The timestamp of when NetMRI first discovered this rule list.
:type FltSetFirstSeenTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetIPVersion: the IP version of the packets filtered by this rule list - default is 4.
:type FltSetIPVersion: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetName: The name of this rule-list.
:type FltSetName: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetProvisionData: Internal data - do not modify, may change without warning.
:type FltSetProvisionData: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetStartTime: The starting effective time of this record.
:type FltSetStartTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetTimestamp: The date and time this record was collected or calculated.
:type FltSetTimestamp: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltSetUseCount: The number of usage of this rule list inside the configuration (may be for filtering or for NAT, vpn etc).
:type FltSetUseCount: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device filter sets as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter set methods. The listed methods will be called on each device filter set returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterSetID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterSetID. Valid values are DeviceFilterSetID, DeviceID, DataSourceID, FltSetFirstSeenTime, FltSetStartTime, FltSetEndTime, FltSetTimestamp, FltSetChangedCols, FltSetName, FltSetIPVersion, FltSetUseCount, FltSetArtificialInd, FltSetConfigText, FltSetProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilterSet. Valid values are DeviceFilterSetID, DeviceID, DataSourceID, FltSetFirstSeenTime, FltSetStartTime, FltSetEndTime, FltSetTimestamp, FltSetChangedCols, FltSetName, FltSetIPVersion, FltSetUseCount, FltSetArtificialInd, FltSetConfigText, FltSetProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device filter sets, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceFilterSetID, DeviceID, FltSetArtificialInd, FltSetChangedCols, FltSetConfigText, FltSetEndTime, FltSetFirstSeenTime, FltSetIPVersion, FltSetName, FltSetProvisionData, FltSetStartTime, FltSetTimestamp, FltSetUseCount.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_sets: An array of the DeviceFilterSet objects that match the specified input criteria.
:rtype device_filter_sets: Array of DeviceFilterSet
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device filter sets matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceFilterSetID, DeviceID, FltSetArtificialInd, FltSetChangedCols, FltSetConfigText, FltSetEndTime, FltSetFirstSeenTime, FltSetIPVersion, FltSetName, FltSetProvisionData, FltSetStartTime, FltSetTimestamp, FltSetUseCount.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceFilterSetID: The operator to apply to the field DeviceFilterSetID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceFilterSetID: The internal NetMRI identifier for this rule list. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceFilterSetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceFilterSetID: If op_DeviceFilterSetID is specified, the field named in this input will be compared to the value in DeviceFilterSetID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceFilterSetID must be specified if op_DeviceFilterSetID is specified.
:type val_f_DeviceFilterSetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceFilterSetID: If op_DeviceFilterSetID is specified, this value will be compared to the value in DeviceFilterSetID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceFilterSetID must be specified if op_DeviceFilterSetID is specified.
:type val_c_DeviceFilterSetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device to which this rule list belongs. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetArtificialInd: The operator to apply to the field FltSetArtificialInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetArtificialInd: A flag indicating that this rule list has no counterpart in the device configuration. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetArtificialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetArtificialInd: If op_FltSetArtificialInd is specified, the field named in this input will be compared to the value in FltSetArtificialInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetArtificialInd must be specified if op_FltSetArtificialInd is specified.
:type val_f_FltSetArtificialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetArtificialInd: If op_FltSetArtificialInd is specified, this value will be compared to the value in FltSetArtificialInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetArtificialInd must be specified if op_FltSetArtificialInd is specified.
:type val_c_FltSetArtificialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetChangedCols: The operator to apply to the field FltSetChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetChangedCols: If op_FltSetChangedCols is specified, the field named in this input will be compared to the value in FltSetChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetChangedCols must be specified if op_FltSetChangedCols is specified.
:type val_f_FltSetChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetChangedCols: If op_FltSetChangedCols is specified, this value will be compared to the value in FltSetChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetChangedCols must be specified if op_FltSetChangedCols is specified.
:type val_c_FltSetChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetConfigText: The operator to apply to the field FltSetConfigText. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetConfigText: The original text of the definition of this rule list in the device configuration. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetConfigText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetConfigText: If op_FltSetConfigText is specified, the field named in this input will be compared to the value in FltSetConfigText using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetConfigText must be specified if op_FltSetConfigText is specified.
:type val_f_FltSetConfigText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetConfigText: If op_FltSetConfigText is specified, this value will be compared to the value in FltSetConfigText using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetConfigText must be specified if op_FltSetConfigText is specified.
:type val_c_FltSetConfigText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetEndTime: The operator to apply to the field FltSetEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetEndTime: If op_FltSetEndTime is specified, the field named in this input will be compared to the value in FltSetEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetEndTime must be specified if op_FltSetEndTime is specified.
:type val_f_FltSetEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetEndTime: If op_FltSetEndTime is specified, this value will be compared to the value in FltSetEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetEndTime must be specified if op_FltSetEndTime is specified.
:type val_c_FltSetEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetFirstSeenTime: The operator to apply to the field FltSetFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetFirstSeenTime: The timestamp of when NetMRI first discovered this rule list. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetFirstSeenTime: If op_FltSetFirstSeenTime is specified, the field named in this input will be compared to the value in FltSetFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetFirstSeenTime must be specified if op_FltSetFirstSeenTime is specified.
:type val_f_FltSetFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetFirstSeenTime: If op_FltSetFirstSeenTime is specified, this value will be compared to the value in FltSetFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetFirstSeenTime must be specified if op_FltSetFirstSeenTime is specified.
:type val_c_FltSetFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetIPVersion: The operator to apply to the field FltSetIPVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetIPVersion: the IP version of the packets filtered by this rule list - default is 4. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetIPVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetIPVersion: If op_FltSetIPVersion is specified, the field named in this input will be compared to the value in FltSetIPVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetIPVersion must be specified if op_FltSetIPVersion is specified.
:type val_f_FltSetIPVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetIPVersion: If op_FltSetIPVersion is specified, this value will be compared to the value in FltSetIPVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetIPVersion must be specified if op_FltSetIPVersion is specified.
:type val_c_FltSetIPVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetName: The operator to apply to the field FltSetName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetName: The name of this rule-list. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetName: If op_FltSetName is specified, the field named in this input will be compared to the value in FltSetName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetName must be specified if op_FltSetName is specified.
:type val_f_FltSetName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetName: If op_FltSetName is specified, this value will be compared to the value in FltSetName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetName must be specified if op_FltSetName is specified.
:type val_c_FltSetName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetProvisionData: The operator to apply to the field FltSetProvisionData. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetProvisionData: Internal data - do not modify, may change without warning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetProvisionData: If op_FltSetProvisionData is specified, the field named in this input will be compared to the value in FltSetProvisionData using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetProvisionData must be specified if op_FltSetProvisionData is specified.
:type val_f_FltSetProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetProvisionData: If op_FltSetProvisionData is specified, this value will be compared to the value in FltSetProvisionData using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetProvisionData must be specified if op_FltSetProvisionData is specified.
:type val_c_FltSetProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetStartTime: The operator to apply to the field FltSetStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetStartTime: If op_FltSetStartTime is specified, the field named in this input will be compared to the value in FltSetStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetStartTime must be specified if op_FltSetStartTime is specified.
:type val_f_FltSetStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetStartTime: If op_FltSetStartTime is specified, this value will be compared to the value in FltSetStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetStartTime must be specified if op_FltSetStartTime is specified.
:type val_c_FltSetStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetTimestamp: The operator to apply to the field FltSetTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetTimestamp: If op_FltSetTimestamp is specified, the field named in this input will be compared to the value in FltSetTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetTimestamp must be specified if op_FltSetTimestamp is specified.
:type val_f_FltSetTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetTimestamp: If op_FltSetTimestamp is specified, this value will be compared to the value in FltSetTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetTimestamp must be specified if op_FltSetTimestamp is specified.
:type val_c_FltSetTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltSetUseCount: The operator to apply to the field FltSetUseCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltSetUseCount: The number of usage of this rule list inside the configuration (may be for filtering or for NAT, vpn etc). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltSetUseCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltSetUseCount: If op_FltSetUseCount is specified, the field named in this input will be compared to the value in FltSetUseCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltSetUseCount must be specified if op_FltSetUseCount is specified.
:type val_f_FltSetUseCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltSetUseCount: If op_FltSetUseCount is specified, this value will be compared to the value in FltSetUseCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltSetUseCount must be specified if op_FltSetUseCount is specified.
:type val_c_FltSetUseCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device filter sets as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter set methods. The listed methods will be called on each device filter set returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterSetID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterSetID. Valid values are DeviceFilterSetID, DeviceID, DataSourceID, FltSetFirstSeenTime, FltSetStartTime, FltSetEndTime, FltSetTimestamp, FltSetChangedCols, FltSetName, FltSetIPVersion, FltSetUseCount, FltSetArtificialInd, FltSetConfigText, FltSetProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilterSet. Valid values are DeviceFilterSetID, DeviceID, DataSourceID, FltSetFirstSeenTime, FltSetStartTime, FltSetEndTime, FltSetTimestamp, FltSetChangedCols, FltSetName, FltSetIPVersion, FltSetUseCount, FltSetArtificialInd, FltSetConfigText, FltSetProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_sets: An array of the DeviceFilterSet objects that match the specified input criteria.
:rtype device_filter_sets: Array of DeviceFilterSet
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterSetID: The internal NetMRI identifier for this rule list.
:type DeviceFilterSetID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterSetID: The internal NetMRI identifier for this rule list.
:type DeviceFilterSetID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| apache-2.0 |
mbox/django | tests/admin_changelist/models.py | 96 | 2583 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
@python_2_unicode_compatible
class Musician(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Membership(models.Model):
music = models.ForeignKey(Musician)
group = models.ForeignKey(Group)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician)
band = models.ForeignKey(ChordsBand)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_queryset(self):
return super(OrderedObjectManager, self).get_queryset().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
class CustomIdUser(models.Model):
uuid = models.AutoField(primary_key=True)
| bsd-3-clause |
lukebarnard1/bokeh | bokeh/models/map_plots.py | 3 | 1834 | """ Models for displaying maps in Bokeh plots.
"""
from __future__ import absolute_import
from ..properties import HasProps
from ..properties import Int, Float, Enum, Instance
from ..enums import MapType
from .plots import Plot
class GMapOptions(HasProps):
""" Options for GMapPlot objects.
"""
lat = Float(help="""
The latitude where the map should be centered.
""")
lng = Float(help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial `zoom level`_ to use when displaying the GMapPlot.
.. _zoom level: https://developers.google.com/maps/documentation/staticmaps/#Zoomlevels
""")
map_type = Enum(MapType, help="""
The `map type`_ to use for the GMapPlot.
.. _map type: https://developers.google.com/maps/documentation/staticmaps/#MapTypes
""")
class GMapPlot(Plot):
""" A Bokeh Plot with a `Google Map`_ displayed underneath.
.. _Google Map: https://www.google.com/maps/
"""
map_options = Instance(GMapOptions, help="""
Options for displaying the plot.
""")
class GeoJSOptions(HasProps):
""" Options for GeoJSPlot objects.
"""
lat = Float(help="""
The latitude where the map should be centered.
""")
lng = Float(help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the GeoJSPlot.
""")
class GeoJSPlot(Plot):
""" A Bokeh Plot with a `GeoJS Map`_ displayed underneath.
.. warning::
GeoJSPlot support should be considered experimental, a subject
to revision or removal.
.. _GeoJS Map: https://github.com/OpenGeoscience/geojs
"""
map_options = Instance(GeoJSOptions, help="""
Options for displaying the plot.
""")
| bsd-3-clause |
Flexget/Flexget | flexget/tests/test_qualities.py | 3 | 11641 | import pytest
from jinja2 import Template
from flexget.components.parsing.parsers.parser_guessit import ParserGuessit
from flexget.components.parsing.parsers.parser_internal import ParserInternal
from flexget.utils.qualities import Quality
class TestQualityModule:
def test_get(self):
assert not Quality(), 'unknown quality is not false'
assert Quality('foobar') == Quality(), 'unknown not returned'
def test_common_name(self):
for test_val in ('720p', '1280x720'):
got_val = Quality(test_val).name
assert got_val == '720p', got_val
class TestQualityParser:
@pytest.fixture(
scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True
)
def parser(self, request):
if request.param == 'internal':
return ParserInternal
if request.param == 'guessit':
return ParserGuessit
@pytest.mark.parametrize(
"test_quality",
[
('Test.File.dvdscr', 'dvdscr'),
('Test.File 1080p.web.vp9', '1080p webdl vp9'),
('Test.File 1080p.web', '1080p webdl'),
('Test.File.2160p.web', '2160p webdl'),
('Test.File.1080.web-random', '1080p webdl'),
('Test.File.1080.webrandom', '1080p'),
('Test.File 1080p.web-dl', '1080p webdl'),
('Test.File.web-dl.1080p', '1080p webdl'),
('Test.File.WebHD.720p', '720p webdl'),
('Test.File.720p.bluray', '720p bluray'),
('Test.File.720hd.bluray', '720p bluray'),
('Test.File.1080p.bluray', '1080p bluray'),
('Test.File.2160p.bluray', '2160p bluray'),
('Test.File.1080p.cam', '1080p cam'),
('A Movie 2011 TS 576P XviD-DTRG', '576p ts xvid'),
('Test.File.720p.bluray.r5', '720p r5'),
('Test.File.1080p.bluray.rc', '1080p r5'),
# 10bit
('Test.File.480p.10bit', '480p 10bit'),
('Test.File.720p.10bit', '720p 10bit'),
('Test.File.720p.bluray.10bit', '720p bluray 10bit'),
('Test.File.1080p.10bit', '1080p 10bit'),
('Test.File.1080p.bluray.10bit', '1080p bluray 10bit'),
('Test.File.720p.web', '720p webdl'),
('Test.File.720p.webdl', '720p webdl'),
('Test.File.1280x720_web dl', '720p webdl'),
('Test.File.720p.h264.web.dl', '720p webdl h264'),
('Test.File.1080p.webhd.x264', '1080p webdl h264'),
('Test.File.REPACK.1080p.WEBRip.DDP5.1.x264', '1080p webrip h264 dd+5.1'),
('Test.File.480.hdtv.x265', '480p hdtv h265'),
('Test.File.web', 'webdl'),
('Test.File.web-dl', 'webdl'),
('Test.File.720P', '720p'),
('Test.File.1920x1080', '1080p'),
('Test.File.3840x2160', '2160p'),
('Test.File.1080i', '1080i'),
('Test File blurayrip', 'bluray'),
('Test.File.br-rip', 'bluray'),
('Test.File.720px', '720p'),
('Test.File.720p50', '720p'),
('Test.File.720p60', '720p'),
('Test.File.dvd.rip', 'dvdrip'),
('Test.File.dvd.rip.r5', 'r5'),
('Test.File.[576p][00112233].mkv', '576p'),
('Test.TS.FooBar', 'ts'),
('Test.File.360p.avi', '360p'),
('Test.File.[360p].mkv', '360p'),
('Test.File.368.avi', '368p'),
('Test.File.720p.hdtv.avi', '720p hdtv'),
('Test.File.1080p.hdtv.avi', '1080p hdtv'),
('Test.File.720p.preair.avi', '720p preair'),
# ('Test.File.ts.dvdrip.avi', 'ts'), This should no exists. Having Telesync and DVDRip is a non-sense.
('Test.File.HDTS.blah', 'ts'),
# ('Test.File.HDCAM.bluray.lie', 'cam'), This should no exists. Having Cam and Bluray is a non-sense.
# Test qualities as part of words. #1593
('Tsar.File.720p', '720p'),
('Camera.1080p', '1080p'),
# Some audio formats
('Test.File.DTSHDMA', 'dtshd'),
('Test.File.DTSHD.MA', 'dtshd'),
('Test.File.DTS.HDMA', 'dtshd'),
('Test.File.dts.hd.ma', 'dtshd'),
('Test.File.DTS.HD', 'dtshd'),
('Test.File.DTSHD', 'dtshd'),
('Test.File.DTS', 'dts'),
('Test.File.truehd', 'truehd'),
('Test.File.truehd7.1', 'truehd'),
('Test.File.truehd.7.1', 'truehd'),
('Test.File.DTSHDMA', 'dtshd'),
('Test.File.DTSHDMA5.1', 'dtshd'),
('Test.File.DD2.0', 'dd5.1', False),
('Test.File.AC35.1', 'ac3', False),
],
)
def test_quality_failures(self, parser, test_quality):
# Kind of a hack to get around the awful limitations of Guessit without creating extra tests
guessit = test_quality[2] if len(test_quality) > 2 else True
if not guessit and parser.__name__ == 'ParserGuessit':
return
quality = parser().parse_movie(test_quality[0]).quality
assert str(quality) == test_quality[1], '`%s` quality should be `%s` not `%s`' % (
test_quality[0],
test_quality[1],
quality,
)
class TestQualityInternalParser:
@pytest.mark.parametrize(
"test_quality",
[
('Test.File.DD+5.1', 'dd+5.1'),
('Test.File.DDP5.1', 'dd+5.1'),
('Test.File.DDP7.1', 'dd+5.1'),
('Test.File.DD5.1', 'dd5.1'),
('Test.File.DD4.0', 'dd5.1'),
('Test.File.DD2.1', 'dd5.1'),
('Test.File.FLAC1.0', 'flac'),
],
)
def test_quality_failures(self, test_quality):
quality = ParserInternal().parse_movie(test_quality[0]).quality
assert str(quality) == test_quality[1], '`%s` quality should be `%s` not `%s`' % (
test_quality[0],
test_quality[1],
quality,
)
class TestFilterQuality:
_config = """
templates:
global:
parsing:
series: {{parser}}
movie: {{parser}}
mock:
- {title: 'Smoke.1280x720'}
- {title: 'Smoke.HDTV'}
- {title: 'Smoke.cam'}
- {title: 'Smoke.HR'}
accept_all: yes
tasks:
qual:
quality:
- hdtv
- 720p
min:
quality: HR+
max:
quality: "<=cam <HR"
min_max:
quality: HR-720i
quality_str:
template: no_global
mock:
- {title: 'Test S01E01 HDTV 1080p', quality: 'hdtv 1080p dd+5.1'}
accept_all: yes
"""
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'])
def config(self, request):
"""Override and parametrize default config fixture."""
return Template(self._config).render({'parser': request.param})
def test_quality(self, execute_task):
task = execute_task('qual')
entry = task.find_entry('rejected', title='Smoke.cam')
assert entry, 'Smoke.cam should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_min(self, execute_task):
task = execute_task('min')
entry = task.find_entry('rejected', title='Smoke.HDTV')
assert entry, 'Smoke.HDTV should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_max(self, execute_task):
task = execute_task('max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.cam')
assert entry, 'entry not found?'
assert entry in task.accepted, 'cam should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
def test_min_max(self, execute_task):
task = execute_task('min_max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.HR')
assert entry, 'entry not found?'
assert entry in task.accepted, 'HR should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
def test_quality_string(self, execute_task):
task = execute_task('quality_str')
entry = task.find_entry('accepted', title='Test S01E01 HDTV 1080p')
assert isinstance(
entry['quality'], Quality
), 'Wrong quality type, should be Quality not str'
assert str(entry['quality']) == '1080p hdtv dd+5.1'
class TestQualityAudio:
config = """
tasks:
test_dd_audio_channels:
quality: "dd+5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD+7.1'}
- {title: 'My Show S01E05 720p HDTV DD+5.0'}
test_dd_audio_min:
quality: ">dd5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD5.1'}
- {title: 'My Show S01E05 720p HDTV DD+2.0'}
test_dd_audio_max:
quality: "<=dd5.1"
mock:
- {title: 'My Show S01E05 720p HDTV DD5.1'}
- {title: 'My Show S01E05 720p HDTV DD+5.1'}
- {title: 'My Show S01E05 720p HDTV DD+7.1'}
"""
def test_dd_audio_channels(self, execute_task):
task = execute_task('test_dd_audio_channels')
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+7.1')
assert entry, 'Entry "My Show S01E05 720p HDTV DD+7.1" should not have been rejected'
assert (
entry['quality'].audio == 'dd+5.1'
), 'audio "dd+7.1" should have been parsed as dd+5.1'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+5.0')
assert (
entry['quality'].audio == 'dd+5.1'
), 'audio "dd+5.0" should have been parsed as dd+5.1'
def test_dd_audio_min(self, execute_task):
task = execute_task('test_dd_audio_min')
assert len(task.rejected) == 1, 'should have rejected one'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD+2.0')
assert entry, 'Entry "My Show S01E05 720p HDTV DD+2.0" should not have been rejected'
assert entry['quality'].audio == 'dd+5.1', 'audio should have been parsed as dd+5.1'
def test_dd_audio_max(self, execute_task):
task = execute_task('test_dd_audio_max')
assert len(task.rejected) == 2, 'should have rejected two'
entry = task.find_entry('undecided', title='My Show S01E05 720p HDTV DD5.1')
assert entry, 'Entry "My Show S01E05 720p HDTV DD5.1" should not have been rejected'
assert entry['quality'].audio == 'dd5.1', 'audio should have been parsed as dd5.1'
| mit |
ibyer/xhtml2pdf | test/witherror.py | 154 | 1128 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 194 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-04-18 18:59:53 +0200 (Fr, 18 Apr 2008) $"
import ho.pisa as pisa
def helloWorld():
filename = __file__ + ".pdf"
pdf = pisa.CreatePDF(
u"Hello <strong>World</strong> <img src='data:image/jpg;base64,?´*'>",
file(filename, "wb"),
show_error_as_pdf=True,
)
if not pdf.err:
pisa.startViewer(filename)
if __name__=="__main__":
pisa.showLogging()
helloWorld()
| apache-2.0 |
tealover/nova | nova/safe_utils.py | 23 | 2343 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions that won't produce circular imports."""
import inspect
def getcallargs(function, *args, **kwargs):
"""This is a simplified inspect.getcallargs (2.7+).
It should be replaced when python >= 2.7 is standard.
This method can only properly grab arguments which are passed in as
keyword arguments, or given names by the method being called. This means
that an *arg in a method signature and any arguments captured by it will
be left out of the results.
"""
keyed_args = {}
argnames, varargs, keywords, defaults = inspect.getargspec(function)
keyed_args.update(kwargs)
# NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
# some tests use 'self2'.
if len(argnames) > 0 and ('self' in argnames[0] or 'cls' == argnames[0]):
# The function may not actually be a method or have im_self.
# Typically seen when it's stubbed with mox.
if inspect.ismethod(function) and hasattr(function, 'im_self'):
keyed_args[argnames[0]] = function.im_self
else:
keyed_args[argnames[0]] = None
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
keyed_args.update(dict(zip(remaining_argnames, args)))
if defaults:
num_defaults = len(defaults)
for argname, value in zip(argnames[-num_defaults:], defaults):
if argname not in keyed_args:
keyed_args[argname] = value
return keyed_args
| apache-2.0 |
rayNymous/nupic | src/nupic/regions/ImageSensorFilters/Tracking.py | 17 | 11228 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import random
import numpy
from PIL import Image
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class Tracking(BaseFilter):
"""
Create resized versions of the original image, using various methods of
padding and stretching.
"""
def __init__(self,
targetDims,
padding=0,
scales=None,
fillValue=0,
fillFromImageWherePossible=True,
preservationMode=None,
qualityLevel='antialias',
dumpDebugImages=False,
applyAlpha=True):
"""
@param qualityLevel -- specifies the quality of the filter to be used
for resizing image patches; must be one of:
'nearest', 'bilinear', 'bicubic', 'antialias'
(in increasing order of quality)
@param applyAlpha -- if True, we'll "apply" the alpha channel to the image
before flattening it (from 'LA' to 'L'). This allows you to pass in
images that are non-rectangular. If False, the alpha channel will
just be used to figure out the bounding box of the object (unless
a tracking box is passed in, in which case alpha is ignored).
Note: this is only useful if fillFromImageWherePossible=False
"""
BaseFilter.__init__(self)
# Apply default for scales
if scales is None:
scales = [1.0]
if type(scales) not in (list, tuple):
raise ValueError("'scales' must be a list or tuple")
if type(scales) is tuple:
scales = list(scales)
self._applyAlpha = applyAlpha
self._targetDims = targetDims
self._padding = padding
self._scales = scales
self._fillValue = fillValue
self._fillFromImageWherePossible = fillFromImageWherePossible
self._preservationMode = preservationMode
self._resizingFilter = eval("Image.%s" % qualityLevel.upper())
self._dumpDebugImages = dumpDebugImages
if fillValue is None:
self._histWeights = numpy.array(range(256), dtype='float')
def process(self, image):
"""
Performs the following operations:
1. Locates the original bounding box of the image as defined by the
image's alpha mask. It is assumed that the alpha mask will consist
of a single rectangular, in which case the resulting bbox will
be exactly equivalent to the mask representation. However, if for
some reason the positive regions of the alpha mask is not a single
rectangle, things will still work.
2. Fit the bounding box to the target dimensions, scaling as needed,
and filling in padding regions if needed (if the aspect ratio of
the bounding box does not match that of the target dimensions
which, in general, will be True.) If padding is needed, we fill
from the original image pixels around the bounding box if
fillFromImageWherePossible is True and we're not outside the original
image bounds, otherwise, we use 'fillValue'.
3. Apply each scale in 'scales' to the resulting cropped image, and
pad each side by 'padding' (pulling from the real image pixels
when possible, and filling with 'fillValue' where not.)
4. Return the list of cropped images.
"""
BaseFilter.process(self, image)
assert image.mode == "LA"
# Pull bbox of the alpha mask
if 'tracking' in image.info:
bbox = image.info['tracking']
if type(bbox) == type(""):
bbox = eval(bbox)
else:
bbox = image.split()[1].getbbox()
# If alpha channel is completely empty, we will end up
# with a bbox of 'None'. Nothing much we can do
if bbox is None:
bbox = (0, 0, image.size[0], image.size[1])
print 'WARNING: empty alpha channel'
# Check for malformed box
elif bbox[0] >= bbox[2] or bbox[1] >= bbox[3]:
bbox = (0, 0, image.size[0], image.size[1])
print 'WARNING: malformed box'
# Ascertain the original raw size of the tracking box
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
if self._fillValue is None:
[gray, alpha] = image.split()
hist = numpy.array(gray.histogram(alpha), dtype='float')
mean = (hist * self._histWeights).sum() / hist.sum()
if mean < 127.5:
fillValue = 255
else:
fillValue = 0
elif isinstance(self._fillValue, int):
fillValue = self._fillValue
else:
fillValue = self._fillValue[random.randint(0, len(self._fillValue)-1)]
# If we're not going to fill from the rest of the image, we should
# apply the alpha from the original image directly. If the original alpha
# was a square bounding box, this won't hurt (since we're not filling from
# the image). If it was a tight mask, this will prevent it from reverting
# back to a square mask.
if (self._applyAlpha) and (not self._fillFromImageWherePossible):
grayImage, alphaImage = image.split()
image = Image.new('L', size=image.size, color=fillValue)
image.paste(grayImage, alphaImage)
newImages = []
for scaleIdx, scale in enumerate(self._scales):
# Target dimensions depend on the scale at which we're operating
targetDims = (self._targetDims[0] * scale,
self._targetDims[1] * scale)
scaleFactorX = float(targetDims[0]) / float(width)
scaleFactorY = float(targetDims[1]) / float(height)
# Determine the scaling factors needed to map the
# bounding box to the target dimensions (prior to
# padding be accounted for)
if self._preservationMode is None:
pass
elif self._preservationMode == "aspect":
scaleFactor = min(scaleFactorX, scaleFactorY)
scaleFactorX = scaleFactor
scaleFactorY = scaleFactor
else:
assert self._preservationMode == "size"
scaleFactorX = scale
scaleFactorY = scale
# Now, holding the scaling factor constant, compute the
# size of the src box in the original image that will
# produce the correctly padded target size
targetWidth = int(round(targetDims[0])) + 2*self._padding
targetHeight = int(round(targetDims[1])) + 2*self._padding
srcWidth = float(targetWidth) / scaleFactorX
srcHeight = float(targetHeight) / scaleFactorY
# Compute the exact coordinates of the source box
if self._fillFromImageWherePossible:
origCenterX = float(bbox[0] + bbox[2]) * 0.5
origCenterY = float(bbox[1] + bbox[3]) * 0.5
halfWidth = srcWidth * 0.5
halfHeight = srcHeight * 0.5
srcBox = (int(round(origCenterX - halfWidth)),
int(round(origCenterY - halfHeight)),
int(round(origCenterX + halfWidth)),
int(round(origCenterY + halfHeight)))
# take into account clipping off the image boundary
clipBox = (max(srcBox[0], 0),
max(srcBox[1], 0),
min(srcBox[2], image.size[0]),
min(srcBox[3], image.size[1]))
#clipOffset = (clipBox[0] - srcBox[0],
# clipBox[1] - srcBox[1])
else:
# extend the bbox to include padding pixels on all sides
paddedBBox = (int(bbox[0] - self._padding/scaleFactorX),
int(bbox[1] - self._padding/scaleFactorY),
int(bbox[2] + self._padding/scaleFactorX),
int(bbox[3] + self._padding/scaleFactorY))
# take into account clipping off the image boundary
clipBox = (max(paddedBBox[0], 0),
max(paddedBBox[1], 0),
min(paddedBBox[2], image.size[0]),
min(paddedBBox[3], image.size[1]))
# The srcBox is the correct aspect ratio, and either taller or wider than the
# bbox, but not both.
srcBox = (0, 0, srcWidth, srcHeight)
clipBoxWidth = clipBox[2] - clipBox[0]
clipBoxHeight = clipBox[3] - clipBox[1]
#clipOffset = (int((srcWidth - clipBoxWidth)/2),
# int((srcHeight - clipBoxHeight)/2))
# Copy the source rect
croppedImage = image.crop(clipBox)
croppedImage.load()
# New development
croppedImage.putalpha(Image.new(mode='L', size=croppedImage.size, color=255))
# Scale the cropped image. At last one dimension of this cropped image
# should be the target size.
xFactor = float(targetWidth) / croppedImage.size[0]
yFactor = float(targetHeight) / croppedImage.size[1]
scaleFactor = min(xFactor, yFactor)
if scaleFactor >= 1:
resizingFilter = Image.BICUBIC
else:
resizingFilter = Image.ANTIALIAS
scaledImage = croppedImage.resize((int(round(scaleFactor * croppedImage.size[0])),
int(round(scaleFactor * croppedImage.size[1]))),
resizingFilter)
clipOffset = (int((targetWidth - scaledImage.size[0]) / 2),
int((targetHeight - scaledImage.size[1]) / 2))
# Paste into a new image
newImage = Image.new(mode='LA', size=(targetWidth, targetHeight), color=fillValue)
newImage.paste(scaledImage, clipOffset)
# Resize the cropped image to the (padded) target size
# Convert and save the scaled image as the output
assert newImage.mode == 'LA'
newImages += [newImage]
# Dump debugging images to disk
if self._dumpDebugImages:
self._handleDebug(newImage, scaleIdx)
return [newImages]
def _handleDebug(self, image, scaleIdx, debugDir="tracking.d"):
"""
Dump tracking boxes to disk for offline analysis
"""
if not hasattr(self, "_debugIndex"):
self._debugIndex = 0
if not os.path.isdir(debugDir):
os.mkdir(debugDir)
debugPath = os.path.join(debugDir, "tracking.%06d.%02d.png" % \
(self._debugIndex, scaleIdx))
image.save(debugPath)
self._debugIndex += 1
def getOutputCount(self):
"""
Return the number of images returned by each call to process().
If the filter creates multiple simultaneous outputs, return a tuple:
(outputCount, simultaneousOutputCount).
"""
return 1, len(self._scales)
| agpl-3.0 |
gustavovaliati/ci724-finalwork-ppginfufpr-2016 | util/google/retrain.py | 19 | 43141 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple transfer learning with an Inception v3 architecture model which
displays summaries in TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import struct
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats , file_path):
"""Writes a given list of floats to a binary file.
Args:
list_of_floats: List of floats we want to write to a file.
file_path: Path to a file where list of floats will be stored.
"""
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
"""Reads list of floats from a given file.
Args:
file_path: Path to a file where list of floats was stored.
Returns:
Array of bottleneck values (list of floats).
"""
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except:
print("Invalid float found, recreating bottleneck")
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
sess = tf.Session()
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
if do_distort_images:
train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Minhua722/NMF | egs/ar/local/ar_extract_nmf_feats.py | 1 | 3850 | #!/usr/bin/env python
import cv2
import numpy as np
import argparse
import math
import pickle
from sklearn.decomposition import PCA
from nmf_support import *
import sys, os
if __name__ == '__main__':
#------------------------------------------------------
# Args parser
#------------------------------------------------------
parser = argparse.ArgumentParser(description='Extract PCA coefficients for each image')
parser.add_argument('--bases_dir', '-base',
action='store', type=str, required=True,
help='directory of bases (eigen vectors)')
parser.add_argument('--exp_id', '-id',
action='store', type=str, required=True,
help='experiment id (related to directory where bases and feats are stored)')
parser.add_argument('--input_dir', '-in',
action='store', type=str, required=True,
help='data dir with a list of image filenames and labels for training (extracted features will also be stored here)')
args = parser.parse_args()
data_dir = args.input_dir.strip('/')
train_list = "%s/train.list" % data_dir
if not os.path.isfile(train_list):
sys.exit(1)
test_sets = []
for set_i in range(2, 14):
test_sets.append("test%d" % set_i)
bases_dir = "%s/%s/bases" % (args.bases_dir.strip('/'), args.exp_id)
bases_pname = "%s/bases.pickle" % bases_dir
if not os.path.isfile(bases_pname):
sys.exit(1)
feats_dir = "%s/%s" % (args.input_dir, args.exp_id)
with open(bases_pname, "rb") as f:
W = pickle.load(f) # each col of W is a basis
D = W.shape[1] # num of bases (feature dimension)
print "%d NMF bases loaded from %s" % (D, bases_pname)
##########################################################################
# Extract training data features
# load img in each col of V
V_raw, img_height, img_width, train_labels = load_data(train_list)
V = normalize_data(V_raw)
train_label_pname = "%s/train_label.pickle" % data_dir
with open(train_label_pname, "wb") as f:
pickle.dump(train_labels, f)
N = V.shape[1]
#train_coefs_pname = "%s/coefs.pickle" % bases_dir
#with open(train_coefs_pname, "rb") as f:
# H = pickle.load(f)
#print H.shape
#assert(H.shape[0] == D and H.shape[1] == N)
# mean and variance normailization for each row
train_feats = np.transpose(np.dot(V.T, W))
train_feats = train_feats - np.mean(train_feats, axis=0).reshape(1, N)
train_feats = train_feats / np.std(train_feats, axis=0).reshape(1, N)
train_feats_pname = "%s/train_feats.pickle" % feats_dir
with open(train_feats_pname, "wb") as f:
pickle.dump(train_feats, f)
#print np.mean(train_feats, axis=0)
#print np.std(train_feats, axis=0)
print "train set nmf feats stored in %s" % train_feats_pname
############################################################################
# Extract test data features
for set_name in test_sets:
test_list = "%s/%s.list" % (data_dir, set_name)
print "Process %s" % test_list
# load img in each col of V
V_raw, img_height, img_width, test_labels = load_data(test_list)
V = normalize_data(V_raw)
test_label_pname = "%s/%s_label.pickle" % (data_dir, set_name)
with open(test_label_pname, "wb") as f:
pickle.dump(test_labels, f)
N = V.shape[1]
print "%d test images of size %dx%d loaded" % (N, img_height, img_width)
test_feats = np.transpose(np.dot(V.T, W)) # each col is nmf feats for one image
assert(test_feats.shape[0] == D and test_feats.shape[1] == N)
# mean and variance normailization for each col
test_feats = test_feats - np.mean(test_feats, axis=0).reshape(1, N)
test_feats = test_feats / np.std(test_feats, axis=0).reshape(1, N)
test_feats_pname = "%s/%s_feats.pickle" % (feats_dir, set_name)
with open(test_feats_pname, "wb") as f:
pickle.dump(test_feats, f)
#print np.mean(test_feats, axis=0)
#print np.std(test_feats, axis=0)
print "%s nmf feats stored in %s" % (set_name, test_feats_pname)
| apache-2.0 |
CubicERP/geraldo | site/newsite/site-geraldo/django/contrib/webdesign/lorem_ipsum.py | 439 | 4872 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
| lgpl-3.0 |
pombredanne/btnet | beautifulsoup4-4.3.2/bs4/builder/_htmlparser.py | 412 | 8839 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-2.0 |
programadorjc/django | tests/forms_tests/tests/test_regressions.py | 155 | 8024 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import (
CharField, ChoiceField, Form, HiddenInput, IntegerField, ModelForm,
ModelMultipleChoiceField, MultipleChoiceField, RadioSelect, Select,
TextInput,
)
from django.test import TestCase, ignore_warnings
from django.utils import translation
from django.utils.translation import gettext_lazy, ugettext_lazy
from ..models import Cheese
class FormsRegressionsTestCase(TestCase):
def test_class(self):
# Tests to prevent against recurrences of earlier bugs.
extra_attrs = {'class': 'special'}
class TestForm(Form):
f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
f2 = CharField(widget=TextInput(attrs=extra_attrs))
self.assertHTMLEqual(TestForm(auto_id=False).as_p(), '<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n<p>F2: <input type="text" class="special" name="f2" /></p>')
def test_regression_3600(self):
# Tests for form i18n #
# There were some problems with form translations in #3600
class SomeForm(Form):
username = CharField(max_length=10, label=ugettext_lazy('username'))
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
# Translations are done at rendering time, so multi-lingual apps can define forms)
with translation.override('de'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Benutzername:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
with translation.override('pl'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">u\u017cytkownik:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
def test_regression_5216(self):
# There was some problems with form translations in #5216
class SomeForm(Form):
field_1 = CharField(max_length=10, label=ugettext_lazy('field_1'))
field_2 = CharField(max_length=10, label=ugettext_lazy('field_2'), widget=TextInput(attrs={'id': 'field_2_id'}))
f = SomeForm()
self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1:</label>')
self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2:</label>')
# Unicode decoding problems...
GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen'))
class SomeForm(Form):
somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf')
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Translated error messages used to be buggy.
with translation.override('ru'):
f = SomeForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Deep copying translated text shouldn't raise an error)
class CopyForm(Form):
degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),)))
f = CopyForm()
@ignore_warnings(category=UnicodeWarning)
def test_regression_5216_b(self):
# Testing choice validation with UTF-8 bytestrings as input (these are the
# Russian abbreviations "мес." and "шт.".
UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'),
(b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.'))
f = ChoiceField(choices=UNITS)
self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.')
self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.')
def test_misc(self):
# There once was a problem with Form fields called "data". Let's make sure that
# doesn't come back.
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({'data': 'xyzzy'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'data': 'xyzzy'})
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist nonfield"><li>(Hidden field data) This field is required.</li></ul>\n<p> <input type="hidden" name="data" id="id_data" /></p>')
self.assertHTMLEqual(f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field data) This field is required.</li></ul><input type="hidden" name="data" id="id_data" /></td></tr>')
def test_xss_error_messages(self):
###################################################
# Tests for XSS vulnerabilities in error messages #
###################################################
# The forms layer doesn't escape input values directly because error messages
# might be presented in non-HTML contexts. Instead, the message is just marked
# for escaping by the template engine. So we'll need to construct a little
# silly template to trigger the escaping.
from django.template import Template, Context
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
from forms_tests.models import ChoiceModel
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>"<script>" is not a valid value for a primary key.</li></ul></li></ul>')
def test_regression_14234(self):
"""
Re-cleaning an instance that was added via a ModelForm should not raise
a pk uniqueness error.
"""
class CheeseForm(ModelForm):
class Meta:
model = Cheese
fields = '__all__'
form = CheeseForm({
'name': 'Brie',
})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Camembert'
obj.full_clean()
| bsd-3-clause |
kangfend/django | tests/introspection/tests.py | 207 | 8471 | from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.utils import DatabaseError
from django.test import TransactionTestCase, mock, skipUnlessDBFeature
from .models import Article, Reporter
class IntrospectionTests(TransactionTestCase):
available_apps = ['introspection']
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertIn(Reporter._meta.db_table, tl,
"'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertIn(Article._meta.db_table, tl,
"'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertNotIn('django_ixn_test_table', tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Table name is a list #15216
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_table_names_with_views(self):
with connection.cursor() as cursor:
try:
cursor.execute(
'CREATE VIEW introspection_article_view AS SELECT headline '
'from introspection_article;')
except DatabaseError as e:
if 'insufficient privileges' in str(e):
self.fail("The test user has no CREATE VIEW privileges")
else:
raise
self.assertIn('introspection_article_view',
connection.introspection.table_names(include_views=True))
self.assertNotIn('introspection_article_view',
connection.introspection.table_names())
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, {Article, Reporter})
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertIn(expected, sequences,
'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['AutoField' if connection.features.can_introspect_autofield else 'IntegerField',
'CharField', 'CharField', 'CharField',
'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField',
'BinaryField' if connection.features.can_introspect_binary_field else 'TextField',
'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField']
)
# The following test fails on Oracle due to #17202 (can't correctly
# inspect the length of character columns).
@skipUnlessDBFeature('can_introspect_max_length')
def test_get_table_description_col_lengths(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[3] for r in desc if datatype(r[1], r) == 'CharField'],
[30, 30, 254]
)
@skipUnlessDBFeature('can_introspect_null')
def test_get_table_description_nullable(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
nullable_by_backend = connection.features.interprets_empty_strings_as_nulls
self.assertEqual(
[r[6] for r in desc],
[False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False]
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_relations(self):
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# That's {field_name: (field_name_other_table, other_table)}
expected_relations = {
'reporter_id': ('id', Reporter._meta.db_table),
'response_to_id': ('id', Article._meta.db_table),
}
self.assertEqual(relations, expected_relations)
# Removing a field shouldn't disturb get_relations (#17785)
body = Article._meta.get_field('body')
with connection.schema_editor() as editor:
editor.remove_field(Article, body)
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
with connection.schema_editor() as editor:
editor.add_field(Article, body)
self.assertEqual(relations, expected_relations)
@skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue")
def test_get_relations_alt_format(self):
"""With SQLite, foreign keys can be added with different syntaxes."""
with connection.cursor() as cursor:
cursor.fetchone = mock.Mock(return_value=[
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES %s(id));" % Article._meta.db_table
])
relations = connection.introspection.get_relations(cursor, 'mocked_table')
self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)})
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_key_columns(self):
with connection.cursor() as cursor:
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(
set(key_columns),
{('reporter_id', Reporter._meta.db_table, 'id'),
('response_to_id', Article._meta.db_table, 'id')})
def test_get_primary_key_column(self):
with connection.cursor() as cursor:
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
self.assertEqual(primary_key_column, 'id')
def test_get_indexes(self):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
def test_get_indexes_multicol(self):
"""
Test that multicolumn indexes are not included in the introspection
results.
"""
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table)
self.assertNotIn('first_name', indexes)
self.assertIn('id', indexes)
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
| bsd-3-clause |
qPCR4vir/orange | Orange/projection/mds.py | 6 | 14713 | """
.. index:: multidimensional scaling (mds)
.. index::
single: projection; multidimensional scaling (mds)
**********************************
Multidimensional scaling (``mds``)
**********************************
The functionality to perform multidimensional scaling
(http://en.wikipedia.org/wiki/Multidimensional_scaling).
The main class to perform multidimensional scaling is
:class:`Orange.projection.mds.MDS`
.. autoclass:: Orange.projection.mds.MDS
:members:
:exclude-members: Torgerson, get_distance, get_stress, calc_stress, run
.. automethod:: calc_stress(stress_func=SgnRelStress)
.. automethod:: run(iter, stress_func=SgnRelStress, eps=1e-3, progress_callback=None)
Stress functions
================
Stress functions that can be used for MDS have to be implemented as functions
or callable classes:
.. method:: \ __call__(correct, current, weight=1.0)
Compute the stress using the correct and the current distance value (the
:obj:`Orange.projection.mds.MDS.distances` and
:obj:`Orange.projection.mds.MDS.projected_distances` elements).
:param correct: correct (actual) distance between elements, represented by
the two points.
:type correct: float
:param current: current distance between the points in the MDS space.
:type current: float
This module provides the following stress functions:
* :obj:`SgnRelStress`
* :obj:`KruskalStress`
* :obj:`SammonStress`
* :obj:`SgnSammonStress`
Examples
========
MDS Scatterplot
---------------
The following script computes the Euclidean distance between the data
instances and runs MDS. Final coordinates are plotted with matplotlib
(not included with orange, http://matplotlib.sourceforge.net/).
Example (:download:`mds-scatterplot.py <code/mds-scatterplot.py>`)
.. literalinclude:: code/mds-scatterplot.py
:lines: 7-
The script produces a file *mds-scatterplot.py.png*. Color denotes
the class. Iris is a relatively simple data set with respect to
classification; to no surprise we see that MDS finds such instance
placement in 2D where instances of different classes are well separated.
Note that MDS has no knowledge of points' classes.
.. image:: files/mds-scatterplot.png
A more advanced example
-----------------------
The following script performs 10 steps of Smacof optimization before computing
the stress. This is suitable if you have a large dataset and want to save some
time.
Example (:download:`mds-advanced.py <code/mds-advanced.py>`)
.. literalinclude:: code/mds-advanced.py
:lines: 7-
A few representative lines of the output are::
<-0.633911848068, 0.112218663096> [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']
<-0.624193906784, -0.111143872142> [4.9, 3.0, 1.4, 0.2, 'Iris-setosa']
...
<0.265250980854, 0.237793982029> [7.0, 3.2, 4.7, 1.4, 'Iris-versicolor']
<0.208580598235, 0.116296850145> [6.4, 3.2, 4.5, 1.5, 'Iris-versicolor']
...
<0.635814905167, 0.238721415401> [6.3, 3.3, 6.0, 2.5, 'Iris-virginica']
<0.356859534979, -0.175976261497> [5.8, 2.7, 5.1, 1.9, 'Iris-virginica']
...
"""
import numpy
from numpy.linalg import svd
import Orange.core
from Orange import orangeom as orangemds
from Orange.utils import deprecated_keywords
from Orange.utils import deprecated_members
KruskalStress = orangemds.KruskalStress()
SammonStress = orangemds.SammonStress()
SgnSammonStress = orangemds.SgnSammonStress()
SgnRelStress = orangemds.SgnRelStress()
PointList = Orange.core.FloatListList
FloatListList = Orange.core.FloatListList
def _mycompare((a,aa),(b,bb)):
if a == b:
return 0
if a < b:
return -1
else:
return 1
class PivotMDS(object):
def __init__(self, distances=None, pivots=50, dim=2, **kwargs):
self.dst = numpy.array([m for m in distances])
self.n = len(self.dst)
if type(pivots) == type(1):
self.k = pivots
self.pivots = numpy.random.permutation(len(self.dst))[:pivots]
#self.pivots.sort()
elif type(pivots) == type([]):
self.pivots = pivots
#self.pivots.sort()
self.k = len(self.pivots)
else:
raise AttributeError('pivots')
def optimize(self):
# # Classical MDS (Torgerson)
# J = identity(self.n) - (1/float(self.n))
# B = -1/2. * dot(dot(J, self.dst**2), J)
# w,v = linalg.eig(B)
# tmp = zip([float(val) for val in w], range(self.n))
# tmp.sort()
# w1, w2 = tmp[-1][0], tmp[-2][0]
# v1, v2 = v[:, tmp[-1][1]], v[:, tmp[-2][1]]
# return v1 * sqrt(w1), v2 * sqrt(w2)
# Pivot MDS
d = self.dst[[self.pivots]].T
C = d**2
# double-center d
cavg = numpy.sum(d, axis=0)/(self.k+0.0) # column sum
ravg = numpy.sum(d, axis=1)/(self.n+0.0) # row sum
tavg = numpy.sum(cavg)/(self.n+0.0) # total sum
# TODO: optimize
for i in xrange(self.n):
for j in xrange(self.k):
C[i,j] += -ravg[i] - cavg[j]
C = -0.5 * (C + tavg)
w,v = numpy.linalg.eig(numpy.dot(C.T, C))
tmp = zip([float(val) for val in w], range(self.n))
tmp.sort()
w1, w2 = tmp[-1][0], tmp[-2][0]
v1, v2 = v[:, tmp[-1][1]], v[:, tmp[-2][1]]
x = numpy.dot(C, v1)
y = numpy.dot(C, v2)
return x, y
@deprecated_members(
{"projectedDistances": "projected_distances",
"originalDistances": "original_distances",
"avgStress": "avg_stress",
"progressCallback": "progress_callback",
"getStress": "calc_stress",
"get_stress": "calc_stress",
"calcStress": "calc_stress",
"getDistance": "calc_distance",
"get_distance": "calc_distance",
"calcDistance": "calc_distance",
"Torgerson": "torgerson",
"SMACOFstep": "smacof_step",
"LSMT": "lsmt"})
class MDS(object):
"""
Main class for performing multidimensional scaling.
:param distances: original dissimilarity - a distance matrix to operate on.
:type distances: :class:`Orange.misc.SymMatrix`
:param dim: dimension of the projected space.
:type dim: int
:param points: an initial configuration of points (optional)
:type points: :class:`Orange.core.FloatListList`
An instance of MDS object has the following attributes and functions:
.. attribute:: points
Holds the current configuration of projected points in an
:class:`Orange.core.FloatListList` object.
.. attribute:: distances
An :class:`Orange.misc.SymMatrix` containing the distances that we
want to achieve (lsmt changes these).
.. attribute:: projected_distances
An :class:`Orange.misc.SymMatrix` containing the distances between
projected points.
.. attribute:: original_distances
An :class:`Orange.misc.SymMatrix` containing the original distances
between points.
.. attribute:: stress
An :class:`Orange.misc.SymMatrix` holding the stress.
.. attribute:: dim
An integer holding the dimension of the projected space.
.. attribute:: n
An integer holding the number of elements (points).
.. attribute:: avg_stress
A float holding the average stress in the :obj:`stress` matrix.
.. attribute:: progress_callback
A function that gets called after each optimization step in the
:func:`run` method.
"""
def __init__(self, distances=None, dim=2, **kwargs):
self.mds=orangemds.MDS(distances, dim, **kwargs)
self.original_distances=Orange.misc.SymMatrix([m for m in self.distances])
def __getattr__(self, name):
if name in ["points", "projected_distances", "distances" ,"stress",
"progress_callback", "n", "dim", "avg_stress"]:
#print "rec:",name
return self.__dict__["mds"].__dict__[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
#print "setattr"
if name=="points":
for i in range(len(value)):
for j in range(len(value[i])):
self.mds.points[i][j]=value[i][j]
return
if name in ["projected_distances", "distances" ,"stress",
"progress_callback"]:
self.mds.__setattr__(name, value)
else:
self.__dict__[name]=value
def __nonzero__(self):
return True
def smacof_step(self):
"""
Perform a single iteration of a Smacof algorithm that optimizes
:obj:`stress` and updates the :obj:`points`.
"""
self.mds.SMACOFstep()
def calc_distance(self):
"""
Compute the distances between points and update the
:obj:`projected_distances` matrix.
"""
self.mds.get_distance()
@deprecated_keywords({"stressFunc": "stress_func"})
def calc_stress(self, stress_func=SgnRelStress):
"""
Compute the stress between the current :obj:`projected_distances` and
:obj:`distances` matrix using *stress_func* and update the
:obj:`stress` matrix and :obj:`avgStress` accordingly.
"""
self.mds.getStress(stress_func)
@deprecated_keywords({"stressFunc": "stress_func"})
def optimize(self, iter, stress_func=SgnRelStress, eps=1e-3,
progress_callback=None):
self.mds.progress_callback=progress_callback
self.mds.optimize(iter, stress_func, eps)
@deprecated_keywords({"stressFunc": "stress_func"})
def run(self, iter, stress_func=SgnRelStress, eps=1e-3,
progress_callback=None):
"""
Perform optimization until stopping conditions are met.
Stopping conditions are:
* optimization runs for *iter* iterations of smacof_step function, or
* stress improvement (old stress minus new stress) is smaller than
eps * old stress.
:param iter: maximum number of optimization iterations.
:type iter: int
:param stress_func: stress function.
"""
self.optimize(iter, stress_func, eps, progress_callback)
def torgerson(self):
"""
Run the Torgerson algorithm that computes an initial analytical
solution of the problem.
"""
# Torgerson's initial approximation
O = numpy.array([m for m in self.distances])
## #B = matrixmultiply(O,O)
## # bug!? B = O**2
## B = dot(O,O)
## # double-center B
## cavg = sum(B, axis=0)/(self.n+0.0) # column sum
## ravg = sum(B, axis=1)/(self.n+0.0) # row sum
## tavg = sum(cavg)/(self.n+0.0) # total sum
## # B[row][column]
## for i in xrange(self.n):
## for j in xrange(self.n):
## B[i,j] += -cavg[j]-ravg[i]
## B = -0.5*(B+tavg)
# B = double-center O**2 !!!
J = numpy.identity(self.n) - (1/numpy.float(self.n))
B = -0.5 * numpy.dot(numpy.dot(J, O**2), J)
# SVD-solve B = ULU'
#(U,L,V) = singular_value_decomposition(B)
(U,L,V)=svd(B)
# X = U(L^0.5)
# # self.X = matrixmultiply(U,identity(self.n)*sqrt(L))
# X is n-dimensional, we take the two dimensions with the largest singular values
idx = numpy.argsort(L)[-self.dim:].tolist()
idx.reverse()
Lt = numpy.take(L,idx) # take those singular values
Ut = numpy.take(U,idx,axis=1) # take those columns that are enabled
Dt = numpy.identity(self.dim)*numpy.sqrt(Lt) # make a diagonal matrix, with squarooted values
self.points = Orange.core.FloatListList(numpy.dot(Ut,Dt))
self.freshD = 0
# D = identity(self.n)*sqrt(L) # make a diagonal matrix, with squarooted values
# X = matrixmultiply(U,D)
# self.X = take(X,idx,1)
# Kruskal's monotone transformation
def lsmt(self):
"""
Execute Kruskal monotone transformation.
"""
# optimize the distance transformation
# build vector o
effect = 0
self.getDistance()
o = []
for i in xrange(1,self.n):
for j in xrange(i):
o.append((self.original_distances[i,j],(i,j)))
o.sort(_mycompare)
# find the ties in o, and construct the d vector sorting in order within ties
d = []
td = []
uv = [] # numbers of consecutively tied o values
(i,j) = o[0][1]
distnorm = self.projected_distances[i,j]*self.projected_distances[i,j]
td = [self.projected_distances[i,j]] # fetch distance
for l in xrange(1,len(o)):
# copy now sorted distances in an array
# but sort distances within a tied o
(i,j) = o[l][1]
cd = self.projected_distances[i,j]
distnorm += self.projected_distances[i,j]*self.projected_distances[i,j]
if o[l][0] != o[l-1][0]:
# differing value, flush
sum = reduce(lambda x,y:x+y,td)+0.0
d.append([sum,len(td),sum/len(td),td])
td = []
td.append(cd)
sum = reduce(lambda x,y:x+y,td)+0.0
d.append([sum,len(td),sum/len(td),td])
####
# keep merging non-monotonous areas in d
monotony = 0
while not monotony and len(d) > 1:
monotony = 1
pi = 0 # index
n = 1 # n-areas
nd = []
r = d[0] # current area
for i in range(1,len(d)):
tr = d[i]
if r[2]>=tr[2]:
monotony = 0
effect = 1
r[0] += tr[0]
r[1] += tr[1]
r[2] = tr[0]/tr[1]
r[3] += tr[3]
else:
nd.append(r)
r = tr
nd.append(r)
d = nd
# normalizing multiplier
sum = 0.0
for i in d:
sum += i[2]*i[2]*i[1]
f = numpy.sqrt(distnorm/numpy.max(sum,1e-6))
# transform O
k = 0
for i in d:
for j in range(i[1]):
(ii,jj) = o[k][1]
self.distances[ii,jj] = f*i[2]
k += 1
assert(len(o) == k)
self.freshD = 0
return effect
| gpl-3.0 |
AMOboxTV/AMOBox.LegoBuild | script.module.unidecode/lib/unidecode/x0ce.py | 253 | 4708 | data = (
'cwik', # 0x00
'cwit', # 0x01
'cwip', # 0x02
'cwih', # 0x03
'cyu', # 0x04
'cyug', # 0x05
'cyugg', # 0x06
'cyugs', # 0x07
'cyun', # 0x08
'cyunj', # 0x09
'cyunh', # 0x0a
'cyud', # 0x0b
'cyul', # 0x0c
'cyulg', # 0x0d
'cyulm', # 0x0e
'cyulb', # 0x0f
'cyuls', # 0x10
'cyult', # 0x11
'cyulp', # 0x12
'cyulh', # 0x13
'cyum', # 0x14
'cyub', # 0x15
'cyubs', # 0x16
'cyus', # 0x17
'cyuss', # 0x18
'cyung', # 0x19
'cyuj', # 0x1a
'cyuc', # 0x1b
'cyuk', # 0x1c
'cyut', # 0x1d
'cyup', # 0x1e
'cyuh', # 0x1f
'ceu', # 0x20
'ceug', # 0x21
'ceugg', # 0x22
'ceugs', # 0x23
'ceun', # 0x24
'ceunj', # 0x25
'ceunh', # 0x26
'ceud', # 0x27
'ceul', # 0x28
'ceulg', # 0x29
'ceulm', # 0x2a
'ceulb', # 0x2b
'ceuls', # 0x2c
'ceult', # 0x2d
'ceulp', # 0x2e
'ceulh', # 0x2f
'ceum', # 0x30
'ceub', # 0x31
'ceubs', # 0x32
'ceus', # 0x33
'ceuss', # 0x34
'ceung', # 0x35
'ceuj', # 0x36
'ceuc', # 0x37
'ceuk', # 0x38
'ceut', # 0x39
'ceup', # 0x3a
'ceuh', # 0x3b
'cyi', # 0x3c
'cyig', # 0x3d
'cyigg', # 0x3e
'cyigs', # 0x3f
'cyin', # 0x40
'cyinj', # 0x41
'cyinh', # 0x42
'cyid', # 0x43
'cyil', # 0x44
'cyilg', # 0x45
'cyilm', # 0x46
'cyilb', # 0x47
'cyils', # 0x48
'cyilt', # 0x49
'cyilp', # 0x4a
'cyilh', # 0x4b
'cyim', # 0x4c
'cyib', # 0x4d
'cyibs', # 0x4e
'cyis', # 0x4f
'cyiss', # 0x50
'cying', # 0x51
'cyij', # 0x52
'cyic', # 0x53
'cyik', # 0x54
'cyit', # 0x55
'cyip', # 0x56
'cyih', # 0x57
'ci', # 0x58
'cig', # 0x59
'cigg', # 0x5a
'cigs', # 0x5b
'cin', # 0x5c
'cinj', # 0x5d
'cinh', # 0x5e
'cid', # 0x5f
'cil', # 0x60
'cilg', # 0x61
'cilm', # 0x62
'cilb', # 0x63
'cils', # 0x64
'cilt', # 0x65
'cilp', # 0x66
'cilh', # 0x67
'cim', # 0x68
'cib', # 0x69
'cibs', # 0x6a
'cis', # 0x6b
'ciss', # 0x6c
'cing', # 0x6d
'cij', # 0x6e
'cic', # 0x6f
'cik', # 0x70
'cit', # 0x71
'cip', # 0x72
'cih', # 0x73
'ka', # 0x74
'kag', # 0x75
'kagg', # 0x76
'kags', # 0x77
'kan', # 0x78
'kanj', # 0x79
'kanh', # 0x7a
'kad', # 0x7b
'kal', # 0x7c
'kalg', # 0x7d
'kalm', # 0x7e
'kalb', # 0x7f
'kals', # 0x80
'kalt', # 0x81
'kalp', # 0x82
'kalh', # 0x83
'kam', # 0x84
'kab', # 0x85
'kabs', # 0x86
'kas', # 0x87
'kass', # 0x88
'kang', # 0x89
'kaj', # 0x8a
'kac', # 0x8b
'kak', # 0x8c
'kat', # 0x8d
'kap', # 0x8e
'kah', # 0x8f
'kae', # 0x90
'kaeg', # 0x91
'kaegg', # 0x92
'kaegs', # 0x93
'kaen', # 0x94
'kaenj', # 0x95
'kaenh', # 0x96
'kaed', # 0x97
'kael', # 0x98
'kaelg', # 0x99
'kaelm', # 0x9a
'kaelb', # 0x9b
'kaels', # 0x9c
'kaelt', # 0x9d
'kaelp', # 0x9e
'kaelh', # 0x9f
'kaem', # 0xa0
'kaeb', # 0xa1
'kaebs', # 0xa2
'kaes', # 0xa3
'kaess', # 0xa4
'kaeng', # 0xa5
'kaej', # 0xa6
'kaec', # 0xa7
'kaek', # 0xa8
'kaet', # 0xa9
'kaep', # 0xaa
'kaeh', # 0xab
'kya', # 0xac
'kyag', # 0xad
'kyagg', # 0xae
'kyags', # 0xaf
'kyan', # 0xb0
'kyanj', # 0xb1
'kyanh', # 0xb2
'kyad', # 0xb3
'kyal', # 0xb4
'kyalg', # 0xb5
'kyalm', # 0xb6
'kyalb', # 0xb7
'kyals', # 0xb8
'kyalt', # 0xb9
'kyalp', # 0xba
'kyalh', # 0xbb
'kyam', # 0xbc
'kyab', # 0xbd
'kyabs', # 0xbe
'kyas', # 0xbf
'kyass', # 0xc0
'kyang', # 0xc1
'kyaj', # 0xc2
'kyac', # 0xc3
'kyak', # 0xc4
'kyat', # 0xc5
'kyap', # 0xc6
'kyah', # 0xc7
'kyae', # 0xc8
'kyaeg', # 0xc9
'kyaegg', # 0xca
'kyaegs', # 0xcb
'kyaen', # 0xcc
'kyaenj', # 0xcd
'kyaenh', # 0xce
'kyaed', # 0xcf
'kyael', # 0xd0
'kyaelg', # 0xd1
'kyaelm', # 0xd2
'kyaelb', # 0xd3
'kyaels', # 0xd4
'kyaelt', # 0xd5
'kyaelp', # 0xd6
'kyaelh', # 0xd7
'kyaem', # 0xd8
'kyaeb', # 0xd9
'kyaebs', # 0xda
'kyaes', # 0xdb
'kyaess', # 0xdc
'kyaeng', # 0xdd
'kyaej', # 0xde
'kyaec', # 0xdf
'kyaek', # 0xe0
'kyaet', # 0xe1
'kyaep', # 0xe2
'kyaeh', # 0xe3
'keo', # 0xe4
'keog', # 0xe5
'keogg', # 0xe6
'keogs', # 0xe7
'keon', # 0xe8
'keonj', # 0xe9
'keonh', # 0xea
'keod', # 0xeb
'keol', # 0xec
'keolg', # 0xed
'keolm', # 0xee
'keolb', # 0xef
'keols', # 0xf0
'keolt', # 0xf1
'keolp', # 0xf2
'keolh', # 0xf3
'keom', # 0xf4
'keob', # 0xf5
'keobs', # 0xf6
'keos', # 0xf7
'keoss', # 0xf8
'keong', # 0xf9
'keoj', # 0xfa
'keoc', # 0xfb
'keok', # 0xfc
'keot', # 0xfd
'keop', # 0xfe
'keoh', # 0xff
)
| gpl-2.0 |
acrsilva/animated-zZz-machine | bundle_final_app/libs/pyqtgraph-develop/pyqtgraph/graphicsItems/ViewBox/axisCtrlTemplate_pyqt.py | 38 | 6134 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/graphicsItems/ViewBox/axisCtrlTemplate.ui'
#
# Created: Mon Dec 23 10:10:51 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from ...Qt import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(186, 154)
Form.setMaximumSize(QtCore.QSize(200, 16777215))
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(Form)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 7, 0, 1, 2)
self.linkCombo = QtGui.QComboBox(Form)
self.linkCombo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
self.linkCombo.setObjectName(_fromUtf8("linkCombo"))
self.gridLayout.addWidget(self.linkCombo, 7, 2, 1, 2)
self.autoPercentSpin = QtGui.QSpinBox(Form)
self.autoPercentSpin.setEnabled(True)
self.autoPercentSpin.setMinimum(1)
self.autoPercentSpin.setMaximum(100)
self.autoPercentSpin.setSingleStep(1)
self.autoPercentSpin.setProperty("value", 100)
self.autoPercentSpin.setObjectName(_fromUtf8("autoPercentSpin"))
self.gridLayout.addWidget(self.autoPercentSpin, 2, 2, 1, 2)
self.autoRadio = QtGui.QRadioButton(Form)
self.autoRadio.setChecked(True)
self.autoRadio.setObjectName(_fromUtf8("autoRadio"))
self.gridLayout.addWidget(self.autoRadio, 2, 0, 1, 2)
self.manualRadio = QtGui.QRadioButton(Form)
self.manualRadio.setObjectName(_fromUtf8("manualRadio"))
self.gridLayout.addWidget(self.manualRadio, 1, 0, 1, 2)
self.minText = QtGui.QLineEdit(Form)
self.minText.setObjectName(_fromUtf8("minText"))
self.gridLayout.addWidget(self.minText, 1, 2, 1, 1)
self.maxText = QtGui.QLineEdit(Form)
self.maxText.setObjectName(_fromUtf8("maxText"))
self.gridLayout.addWidget(self.maxText, 1, 3, 1, 1)
self.invertCheck = QtGui.QCheckBox(Form)
self.invertCheck.setObjectName(_fromUtf8("invertCheck"))
self.gridLayout.addWidget(self.invertCheck, 5, 0, 1, 4)
self.mouseCheck = QtGui.QCheckBox(Form)
self.mouseCheck.setChecked(True)
self.mouseCheck.setObjectName(_fromUtf8("mouseCheck"))
self.gridLayout.addWidget(self.mouseCheck, 6, 0, 1, 4)
self.visibleOnlyCheck = QtGui.QCheckBox(Form)
self.visibleOnlyCheck.setObjectName(_fromUtf8("visibleOnlyCheck"))
self.gridLayout.addWidget(self.visibleOnlyCheck, 3, 2, 1, 2)
self.autoPanCheck = QtGui.QCheckBox(Form)
self.autoPanCheck.setObjectName(_fromUtf8("autoPanCheck"))
self.gridLayout.addWidget(self.autoPanCheck, 4, 2, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "Link Axis:", None))
self.linkCombo.setToolTip(_translate("Form", "<html><head/><body><p>Links this axis with another view. When linked, both views will display the same data range.</p></body></html>", None))
self.autoPercentSpin.setToolTip(_translate("Form", "<html><head/><body><p>Percent of data to be visible when auto-scaling. It may be useful to decrease this value for data with spiky noise.</p></body></html>", None))
self.autoPercentSpin.setSuffix(_translate("Form", "%", None))
self.autoRadio.setToolTip(_translate("Form", "<html><head/><body><p>Automatically resize this axis whenever the displayed data is changed.</p></body></html>", None))
self.autoRadio.setText(_translate("Form", "Auto", None))
self.manualRadio.setToolTip(_translate("Form", "<html><head/><body><p>Set the range for this axis manually. This disables automatic scaling. </p></body></html>", None))
self.manualRadio.setText(_translate("Form", "Manual", None))
self.minText.setToolTip(_translate("Form", "<html><head/><body><p>Minimum value to display for this axis.</p></body></html>", None))
self.minText.setText(_translate("Form", "0", None))
self.maxText.setToolTip(_translate("Form", "<html><head/><body><p>Maximum value to display for this axis.</p></body></html>", None))
self.maxText.setText(_translate("Form", "0", None))
self.invertCheck.setToolTip(_translate("Form", "<html><head/><body><p>Inverts the display of this axis. (+y points downward instead of upward)</p></body></html>", None))
self.invertCheck.setText(_translate("Form", "Invert Axis", None))
self.mouseCheck.setToolTip(_translate("Form", "<html><head/><body><p>Enables mouse interaction (panning, scaling) for this axis.</p></body></html>", None))
self.mouseCheck.setText(_translate("Form", "Mouse Enabled", None))
self.visibleOnlyCheck.setToolTip(_translate("Form", "<html><head/><body><p>When checked, the axis will only auto-scale to data that is visible along the orthogonal axis.</p></body></html>", None))
self.visibleOnlyCheck.setText(_translate("Form", "Visible Data Only", None))
self.autoPanCheck.setToolTip(_translate("Form", "<html><head/><body><p>When checked, the axis will automatically pan to center on the current data, but the scale along this axis will not change.</p></body></html>", None))
self.autoPanCheck.setText(_translate("Form", "Auto Pan Only", None))
| lgpl-3.0 |
grantsewell/nzbToMedia | libs/requests/packages/urllib3/contrib/pyopenssl.py | 215 | 10101 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| gpl-3.0 |
kkopachev/thumbor | integration_tests/__init__.py | 4 | 1410 | import os.path
from shutil import which
from tornado.testing import AsyncHTTPTestCase
from thumbor.app import ThumborServiceApp
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.importer import Importer
class EngineCase(AsyncHTTPTestCase):
def get_app(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
server_params = ServerParameters(None, None, None, None, None, None)
server_params.gifsicle_path = which("gifsicle")
cfg.DETECTORS = [
"thumbor.detectors.face_detector",
"thumbor.detectors.profile_detector",
"thumbor.detectors.glasses_detector",
"thumbor.detectors.feature_detector",
]
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = os.path.join(os.path.dirname(__file__), "imgs")
cfg.ENGINE = getattr(self, "engine", None)
cfg.USE_GIFSICLE_ENGINE = True
cfg.FFMPEG_PATH = which("ffmpeg")
cfg.ENGINE_THREADPOOL_SIZE = 10
cfg.OPTIMIZERS = [
"thumbor.optimizers.gifv",
]
if not cfg.ENGINE:
return None
importer = Importer(cfg)
importer.import_modules()
ctx = Context(server_params, cfg, importer)
application = ThumborServiceApp(ctx)
return application
| mit |
haocs/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyFile/autorestswaggerbatfileservice/models/error.py | 211 | 1286 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Error.
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, status=None, message=None):
self.status = status
self.message = message
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
| mit |
Fastcode/NUClearExample | nuclear/b.py | 1 | 3760 | #!/usr/bin/env python3
import sys
import os
import argparse
import pkgutil
import re
# Don't make .pyc files
sys.dont_write_bytecode = True
# Go and get all the relevant directories and variables from cmake
nuclear_dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.dirname(nuclear_dir)
# Get the tools directories to find b modules
nuclear_tools_path = os.path.join(nuclear_dir, 'tools')
user_tools_path = os.path.join(project_dir, 'tools')
# Build our cmake cache
cmake_cache = {}
# Try to find our cmake cache file in the pwd
if os.path.isfile('CMakeCache.txt'):
with open('CMakeCache.txt', 'r') as f:
cmake_cache_text = f.readlines()
# Look for a build directory
else:
dirs = ['build']
try:
dirs.extend([os.path.join('build', f) for f in os.listdir('build')])
except FileNotFoundError:
pass
for d in dirs:
if os.path.isfile(os.path.join(project_dir, d, 'CMakeCache.txt')):
with open(os.path.join(project_dir, d, 'CMakeCache.txt'), 'r') as f:
cmake_cache_text = f.readlines()
break;
# If we still didn't find anything
try:
cmake_cache_text
except NameError:
cmake_cache_text = []
# Go and process our lines in our cmake file
for l in cmake_cache_text:
# Remove whitespace at the ends and start
l = l.strip()
# Remove lines that are comments
if len(l) > 0 and not l.startswith('//') and not l.startswith('#'):
# Extract our variable name from our values
g = re.match(r'([a-zA-Z_$][a-zA-Z_.$0-9-]*):(\w+)=(.*)', l).groups()
# Store our value and split it into a list if it is a list
cmake_cache[g[0]] = g[2] if ';' not in g[2].strip(';') else g[2].strip(';').split(';');
# Try to find our source and binary directories
try:
binary_dir = cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_BINARY_DIR']
except KeyError:
binary_dir = None
try:
source_dir = cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_SOURCE_DIR']
except:
source_dir = project_dir
if __name__ == "__main__":
if (binary_dir is not None):
# Print some information for the user
print("b script for", cmake_cache["CMAKE_PROJECT_NAME"])
print("\tSource:", cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_SOURCE_DIR'])
print("\tBinary:", cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_BINARY_DIR'])
print()
# Add our builtin tools to the path and user tools
sys.path.append(nuclear_tools_path)
sys.path.append(user_tools_path)
# Root parser information
command = argparse.ArgumentParser(description='This script is an optional helper script for performing common tasks for working with the NUClear roles system.')
subcommands = command.add_subparsers(dest='command')
subcommands.help = "The command to run from the script. See each help for more information."
# Get all of the packages that are in the build tools
modules = pkgutil.iter_modules(path=[nuclear_tools_path, user_tools_path])
# Our tools dictionary
tools = {}
# Loop through all the modules we have to set them up in the parser
for loader, module_name, ispkg in modules:
# Get our module, class name and registration function
module = loader.find_module(module_name).load_module(module_name)
tool = getattr(module, 'run')
register = getattr(module, 'register')
# Let the tool register it's arguments
register(subcommands.add_parser(module_name))
# Associate our module_name with this tool
tools[module_name] = tool
# Parse our arguments
args = command.parse_args()
# Pass to our tool
tools[args.command](**vars(args))
| mit |
darkleons/BE | addons/sale_journal/__init__.py | 443 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_journal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
drimer/NetControl | webconf/settings.py | 1 | 2783 | """
Django settings for NetControl project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sfc3xmpqskk2&^)kiybw41es_w+2m_z8suv57&6j6+fzl1o@r^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'webconf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'webapp.template_context.javascript.templates',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webconf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'client'),
)
| gpl-2.0 |
yrobla/nova | nova/tests/test_libvirt.py | 1 | 220373 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import eventlet
import fixtures
import json
import mox
import os
import re
import shutil
import tempfile
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
from nova.api.ec2 import cloud
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests import fake_network
import nova.tests.image.fake
from nova.tests import matchers
from nova import utils
from nova import version
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
try:
import libvirt
except ImportError:
import nova.tests.fakelibvirt as libvirt
libvirt_driver.libvirt = libvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
LOG = logging.getLogger(__name__)
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
def _concurrency(signal, wait, done, target):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None):
self.uuidstr = uuidstr
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
return "fake-domain %s" % self
def info(self):
return [power_state.RUNNING, None, None, None, None]
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, *args):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
eventlet.sleep(0)
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
self.flags(instances_path='')
self.flags(libvirt_snapshots_directory='')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
instance_type = db.instance_type_get(self.context, 5)
sys_meta = instance_types.save_instance_type_info({}, instance_type)
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': sys_meta}
def tearDown(self):
nova.tests.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = 'iscsi=nova.tests.test_libvirt.FakeVolumeDriver'
self.flags(libvirt_volume_drivers=[volume_driver])
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn = fake
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return db.service_create(context.get_admin_context(), service_ref)
def test_get_connector(self):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
result = conn.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
def test_get_guest_config(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.apic, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, vm_mode.HVM)
self.assertEquals(cfg.os_boot_dev, "hd")
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 7)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.clock),
vconfig.LibvirtConfigGuestClock)
self.assertEquals(cfg.clock.offset, "utc")
self.assertEquals(len(cfg.clock.timers), 2)
self.assertEquals(type(cfg.clock.timers[0]),
vconfig.LibvirtConfigGuestTimer)
self.assertEquals(type(cfg.clock.timers[1]),
vconfig.LibvirtConfigGuestTimer)
self.assertEquals(cfg.clock.timers[0].name, "pit")
self.assertEquals(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEquals(cfg.clock.timers[1].name, "rtc")
self.assertEquals(cfg.clock.timers[1].tickpolicy,
"catchup")
def test_get_guest_config_with_two_nics(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, vm_mode.HVM)
self.assertEquals(cfg.os_boot_dev, "hd")
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
def test_get_guest_config_bug_1118829(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertEquals(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
self.assertEquals(cfg.acpi, False)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, "uml")
self.assertEquals(cfg.os_boot_dev, None)
self.assertEquals(cfg.os_root, '/dev/vdb')
self.assertEquals(len(cfg.devices), 3)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref, info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[3].target_dev, 'vdd')
def test_get_guest_config_with_configdrive(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
# make configdrive.enabled_for() return True
instance_ref['config_drive'] = 'ANY_ID'
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'hdd')
def test_get_guest_config_with_vnc(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=False)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 5)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=False,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestChannel)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEquals(cfg.devices[5].type, "spice")
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestChannel)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEquals(cfg.devices[6].type, "vnc")
self.assertEquals(cfg.devices[7].type, "spice")
def test_get_guest_cpu_config_none(self):
self.flags(libvirt_cpu_mode="none")
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(libvirt_type="kvm",
libvirt_cpu_mode=None)
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_default_uml(self):
self.flags(libvirt_type="uml",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(libvirt_type="lxc",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_host_passthrough_new(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-passthrough")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_host_model_new(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_custom_new(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "custom")
self.assertEquals(conf.cpu.model, "Penryn")
def test_get_guest_cpu_config_host_passthrough_old(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(libvirt.virConnect, "getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.assertRaises(exception.NovaException,
conn.get_guest_config,
instance_ref,
_fake_network_info(self.stubs, 1),
None,
disk_info)
def test_get_guest_cpu_config_host_model_old(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 7
# Ensure we have a predictable host CPU
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
self.stubs.Set(libvirt_driver.LibvirtDriver,
"get_host_capabilities",
get_host_capabilities_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Opteron_G4")
self.assertEquals(conf.cpu.vendor, "AMD")
self.assertEquals(len(conf.cpu.features), 2)
self.assertEquals(conf.cpu.features[0].name, "tm2")
self.assertEquals(conf.cpu.features[1].name, "ht")
def test_get_guest_cpu_config_custom_old(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Penryn")
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid({"disk_format": "raw"})
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
self._check_xml_and_disk_bus({"disk_format": "raw"},
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
self._check_xml_and_disk_bus({"disk_format": "iso"},
None,
(("cdrom", "ide", "hda"),))
def test_xml_disk_bus_ide_and_virtio(self):
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
self._check_xml_and_disk_bus({"disk_format": "iso"},
block_device_info,
(("cdrom", "ide", "hda"),
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
def test_list_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
def test_list_defined_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: [1]
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one defined domain should be listed
self.assertEquals(len(instances), 1)
def test_list_instances_when_instance_deleted(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("we deleted an instance!")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# None should be listed, since we fake deleted the last one
self.assertEquals(len(instances), 0)
def test_get_all_block_devices(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
def test_get_disks(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
def fake_lookup_name(name):
return FakeVirtDomain(xml[1])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
def test_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
{"name": "fake-instance"},
"/dev/sda")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
self.assertEquals(interfaces[0].get('type'), 'bridge')
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix=None):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'sda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
if prefix:
self.flags(libvirt_disk_prefix=prefix)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
def connection_supports_direct_io_stub(*args, **kwargs):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info,
image_meta)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=None, expect_xen_hvm=False, xen_only=False):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, instance)
network_ref = db.project_get_networks(context.get_admin_context(),
self.project_id)[0]
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.XEN)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
if expect_xen_hvm:
type_uri_map = {}
type_uri_map['xen'] = ('xen:///',
[(lambda t: t.find('.').get('type'),
'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM)])
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: t.find('./os/kernel').text.split(
'/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
# Hypervisors that only support vm_mode.HVM should
# not produce configuration that results in kernel
# arguments
if not expect_kernel and hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: t.find('./os/initrd').text.split(
'/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial/source')[0].get(
'path').split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
'file').split('/')[1], 'disk.rescue'),
(lambda t: t.findall('./devices/disk/source')[1].get(
'file').split('/')[1], 'disk')]
else:
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[0].get('file').split('/')[1],
'disk')]
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[1].get('file').split('/')[1],
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
rescue=rescue)
xml = conn.to_xml(instance_ref, network_info, disk_info,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
(network, mapping) = network_info[0]
nic_id = mapping['mac'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
def fake_raise(self):
raise libvirt.libvirtError('ERR')
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance)
# Start test
self.mox.ReplayAll()
try:
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(conn.firewall_driver,
'instance_filter_exists',
fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
network_info,
time_module=fake_timer)
except exception.NovaException, e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= str(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
db.instance_destroy(self.context, instance_ref['uuid'])
def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, True)
self.assertThat({"filename": "file",
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
self.mox.StubOutWithMock(conn, '_compare_cpu')
conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
reason='foo')
)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidCPUInfo,
conn.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
conn._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
conn.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def test_check_can_live_migrate_source_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
conn._assert_dest_node_has_enough_disk(
self.context, instance_ref, dest_check_data['disk_available_mb'],
False)
self.mox.ReplayAll()
conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
def test_check_can_live_migrate_source_vol_backed_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": True}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
ret = conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
self.assertTrue(type(ret) == dict)
self.assertTrue('is_shared_storage' in ret)
def test_check_can_live_migrate_source_vol_backed_fails(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source, self.context,
instance_ref, dest_check_data)
def test_check_can_live_migrate_dest_fail_shared_storage_with_blockm(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_no_shared_storage_no_blck_mig_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
'[{"virt_disk_size":2}]')
dest_check_data = {"filename": "file",
"disk_available_mb": 0,
"block_migration": True,
"disk_over_commit": False}
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['uuid'],
instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.live_migration_bandwidth
vdmock.migrateToURI(CONF.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(libvirt.libvirtError('ERR'))
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
#start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
self.compute._rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = {'id': 'foo'}
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
result = conn.pre_live_migration(c, inst_ref, vol, nw_info)
self.assertEqual(result, None)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = db.instance_create(self.context, self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name']
}
ret = conn.pre_live_migration(c, inst_ref, vol, nw_info,
migrate_data)
self.assertEqual(ret, None)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
db.instance_destroy(self.context, inst_ref['uuid'])
def test_pre_block_migration_works_correctly(self):
# Replace instances_path since this testcase creates tmpfile
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummy_info = [{'path': '%s/disk' % tmpdir,
'disk_size': 10737418240,
'type': 'raw',
'backing_file': ''},
{'backing_file': 'otherdisk_1234567',
'path': '%s/otherdisk' % tmpdir,
'virt_disk_size': 10737418240}]
dummyjson = json.dumps(dummy_info)
# qemu-img should be mockd since test environment might not have
# large disk space.
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename='otherdisk',
image_id=self.test_instance['image_ref'],
project_id='fake',
size=10737418240L,
user_id=None).AndReturn(None)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson)
self.assertTrue(os.path.exists('%s/%s/' %
(tmpdir, instance_ref['uuid'])))
db.instance_destroy(self.context, instance_ref['uuid'])
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
GB = 1024 * 1024 * 1024
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref['name'])
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
self.assertEquals(info[0]['path'], '/test/disk')
self.assertEquals(info[0]['disk_size'], 10737418240)
self.assertEquals(info[0]['backing_file'], "")
self.assertEquals(info[0]['over_committed_disk_size'], 0)
self.assertEquals(info[1]['type'], 'qcow2')
self.assertEquals(info[1]['path'], '/test/disk.local')
self.assertEquals(info[1]['virt_disk_size'], 21474836480)
self.assertEquals(info[1]['backing_file'], "file")
self.assertEquals(info[1]['over_committed_disk_size'], 18146236825)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
GB = 1024 * 1024 * 1024
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref['name'],
block_device_info=info)
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
self.assertEquals(info[0]['path'], '/test/disk')
self.assertEquals(info[0]['disk_size'], 10737418240)
self.assertEquals(info[0]['backing_file'], "")
self.assertEquals(info[0]['over_committed_disk_size'], 0)
self.assertEquals(info[1]['type'], 'qcow2')
self.assertEquals(info[1]['path'], '/test/disk.local')
self.assertEquals(info[1]['virt_disk_size'], 21474836480)
self.assertEquals(info[1]['backing_file'], "file")
self.assertEquals(info[1]['over_committed_disk_size'], 18146236825)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9007
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance_type = db.instance_type_get(self.context,
instance_ref['instance_type_id'])
sys_meta = instance_types.save_instance_type_info({}, instance_type)
instance_ref['system_metadata'] = sys_meta
instance = db.instance_create(self.context, instance_ref)
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn({'state': power_state.RUNNING})
# Start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
conn.spawn(self.context, instance, None, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path, CONF.base_dir_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.base_dir_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_image', fake_create_image)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.create_image_called)
conn.spawn(self.context,
instance,
{'id': instance['image_ref']},
[],
None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda'}]}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
def test_create_image_plain(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
]
self.assertEquals(gotFiles, wantFiles)
def test_create_image_with_swap(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
# Turn on some swap to exercise that codepath in _create_image
instance_ref['system_metadata']['instance_type_swap'] = 500
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
{'filename': 'swap_500',
'size': 500 * 1024 * 1024},
]
self.assertEquals(gotFiles, wantFiles)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEquals('67890', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEquals('67890', output)
def test_get_host_ip_addr(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, CONF.my_ip)
def test_broken_connection(self):
for (error, domain) in (
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
(libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
conn._wrapped_conn.getLibVersion().AndRaise(
libvirt.libvirtError("fake failure"))
libvirt.libvirtError.get_error_code().AndReturn(error)
libvirt.libvirtError.get_error_domain().AndReturn(domain)
self.mox.ReplayAll()
self.assertFalse(conn._test_connection())
self.mox.UnsetStubs()
def test_immediate_delete(self):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
def test_destroy_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
self.mox.StubOutWithMock(shutil, "rmtree")
shutil.rmtree(os.path.join(CONF.instances_path, instance['name']))
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [])
def test_destroy_not_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [], None, False)
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndReturn(1)
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_undefine_flags(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_with_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_no_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_error_code(self):
return libvirt.VIR_ERR_OPERATION_TIMEOUT
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(libvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.assertRaises(exception.InstancePowerOffFailure,
conn.destroy, instance, [])
def test_private_destroy_not_found(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
conn._destroy(instance)
def test_disk_over_committed_size_total(self):
# Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
return ['fake1', 'fake2']
self.stubs.Set(conn, 'list_instances', list_instances)
fake_disks = {'fake1': [{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size':'83886080',
'over_committed_disk_size':'10653532160'}],
'fake2': [{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size':'10737418240',
'over_committed_disk_size':'0'}]}
def get_info(instance_name):
return jsonutils.dumps(fake_disks.get(instance_name))
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
result = conn.get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = "x86_64"
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "x86_64"
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "i686"
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": ["extapic", "3dnow"],
"model": "Opteron_G4",
"arch": "x86_64",
"topology": {"cores": 2, "threads": 1, "sockets": 4}}
got = jsonutils.loads(conn.get_cpu_info())
self.assertEqual(want, got)
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise libvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise libvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
raise libvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
}
self.assertEqual(actual, expect)
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
raise libvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_failing_vcpu_count(self):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
return None
else:
return ([1] * self._vcpus, [True] * self._vcpus)
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = driver._conn
self.mox.StubOutWithMock(driver, 'list_instance_ids')
self.mox.StubOutWithMock(conn, 'lookupByID')
driver.list_instance_ids().AndReturn([1, 2])
conn.lookupByID(1).AndReturn(DiagFakeDomain(None))
conn.lookupByID(2).AndReturn(DiagFakeDomain(5))
self.mox.ReplayAll()
self.assertEqual(5, driver.get_vcpu_used())
def test_get_instance_capabilities(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'x86_64'
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'i686'
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = [('x86_64', 'kvm', 'hvm'),
('x86_64', 'qemu', 'hvm'),
('i686', 'kvm', 'hvm')]
got = conn.get_instance_capabilities()
self.assertEqual(want, got)
def test_event_dispatch(self):
# Validate that the libvirt self-pipe for forwarding
# events between threads is working sanely
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
conn._queue_event(event1)
conn._queue_event(event2)
conn._dispatch_events()
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
conn._queue_event(event3)
conn._queue_event(event4)
conn._dispatch_events()
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_lifecycle(self):
# Validate that libvirt events are correctly translated
# to Nova events
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
fake_dom_xml = """
<domain type='kvm'>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
dom = FakeVirtDomain(fake_dom_xml,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
conn._event_lifecycle_callback(conn._conn,
dom,
libvirt.VIR_DOMAIN_EVENT_STOPPED,
0,
conn)
conn._dispatch_events()
self.assertEqual(len(got_events), 1)
self.assertEqual(type(got_events[0]), virtevent.LifecycleEvent)
self.assertEqual(got_events[0].uuid,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
self.assertEqual(got_events[0].transition,
virtevent.EVENT_LIFECYCLE_STOPPED)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, None)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
def _test_shared_storage_detection(self, is_same):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('bar')
utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
if is_same:
os.unlink(mox.IgnoreArg())
else:
utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
self.mox.ReplayAll()
return conn._is_storage_shared_with('foo', '/path')
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
class HostStateTestCase(test.TestCase):
cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
'"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
'"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
'"mtrr", "sep", "apic"], '
'"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
class FakeConnection(object):
"""Fake connection object."""
def get_vcpu_total(self):
return 1
def get_vcpu_used(self):
return 0
def get_cpu_info(self):
return HostStateTestCase.cpu_info
def get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_memory_mb_total(self):
return 497
def get_memory_mb_used(self):
return 88
def get_hypervisor_type(self):
return 'QEMU'
def get_hypervisor_version(self):
return 13091
def get_hypervisor_hostname(self):
return 'compute1'
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def get_disk_available_least(self):
return 13091
def get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def test_update_status(self):
hs = libvirt_driver.HostState(self.FakeConnection())
stats = hs._stats
self.assertEquals(stats["vcpus"], 1)
self.assertEquals(stats["vcpus_used"], 0)
self.assertEquals(stats["cpu_info"],
{"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEquals(stats["disk_total"], 100)
self.assertEquals(stats["disk_used"], 20)
self.assertEquals(stats["disk_available"], 80)
self.assertEquals(stats["host_memory_total"], 497)
self.assertEquals(stats["host_memory_free"], 409)
self.assertEquals(stats["hypervisor_type"], 'QEMU')
self.assertEquals(stats["hypervisor_version"], 13091)
self.assertEquals(stats["hypervisor_hostname"], 'compute1')
class NWFilterFakes:
def __init__(self):
self.filters = {}
def nwfilterLookupByName(self, name):
if name in self.filters:
return self.filters[name]
raise libvirt.libvirtError('Filter Not Found')
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal:
def __init__(self, parent, name, xml):
self.name = name
self.parent = parent
self.xml = xml
def undefine(self):
del self.parent.filters[self.name]
pass
tree = etree.fromstring(xml)
name = tree.get('name')
if name not in self.filters:
self.filters[name] = FakeNWFilterInternal(self, name, xml)
return True
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
class FakeLibvirtDriver(object):
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass
self.fake_libvirt_connection = FakeLibvirtDriver()
self.fw = firewall.IptablesFirewallDriver(
fake.FakeVirtAPI(),
get_connection=lambda: self.fake_libvirt_connection)
in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
'*mangle',
':PREROUTING ACCEPT [241:39722]',
':INPUT ACCEPT [230:39282]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [266:26558]',
':POSTROUTING ACCEPT [267:26590]',
'-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill',
'COMMIT',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testgroup',
'description': 'test group'})
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('ip6tables-save', '-c'):
return '\n'.join(self.in6_filter_rules), None
if cmd == ('iptables-save', '-c'):
return '\n'.join(self.in_rules), None
if cmd == ('iptables-restore', '-c'):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
if cmd == ('ip6tables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
return '', ''
network_model = _fake_network_info(self.stubs, 1, spectacular=True)
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
from nova.compute import utils as compute_utils
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
'-s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
'--icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
'%s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"Protocol/port-less acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEquals(ipv4_network_rules, rules)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'instance_rules')
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instances[instance_ref['id']] = instance_ref
self.fw.do_refresh_security_group_rules("fake")
def test_unfilter_instance_undefines_nwfilter(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
_xml_mock = fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
_lookup_name = fakefilter.nwfilterLookupByName
self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
class Mock(object):
pass
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.fake_libvirt_connection = Mock()
self.fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(),
lambda: self.fake_libvirt_connection)
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
security_group = db.security_group_get_by_name(self.context,
'fake',
'testgroup')
self.teardown_security_group()
def teardown_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.delete_security_group(self.context, 'testgroup')
def setup_and_return_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
def _create_instance(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def _create_instance_type(self, params=None):
"""Create a test instance."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = '1024'
inst['vcpus'] = '1'
inst['root_gb'] = '10'
inst['ephemeral_gb'] = '20'
inst['flavorid'] = '1'
inst['swap'] = '2048'
inst['rxtx_factor'] = 1
inst.update(params)
return db.instance_type_create(context, inst)['id']
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def _filterDefineXMLMock(xml):
dom = minidom.parseString(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertTrue(ref in self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
def _ensure_all_called(mac, allow_dhcp):
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
mac.translate(None, ':'))
requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']
if allow_dhcp:
requiredlist.append('allow-dhcp-server')
for required in requiredlist:
self.assertTrue(required in
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
# since there is one (network_info) there is one vif
# pass this vif's mac to _ensure_all_called()
# to set the instance_filter properly
mac = network_info[0][1]['mac']
self.fw.setup_basic_filtering(instance, network_info)
allow_dhcp = False
for (network, mapping) in network_info:
if mapping['dhcp_server']:
allow_dhcp = True
break
_ensure_all_called(mac, allow_dhcp)
db.instance_remove_security_group(self.context, inst_uuid,
self.security_group['id'])
self.teardown_security_group()
db.instance_destroy(context.get_admin_context(), instance_ref['uuid'])
def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance, network_info)
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_nwfilter_parameters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
(network, mapping) = network_info[0]
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self.fw._instance_filter_name(instance, nic_id)
f = fakefilter.nwfilterLookupByName(instance_filter_name)
tree = etree.fromstring(f.xml)
for fref in tree.findall('filterref'):
parameters = fref.findall('./parameter')
for parameter in parameters:
if parameter.get('name') == 'IP':
self.assertTrue(_ipv4_like(parameter.get('value'),
'192.168'))
elif parameter.get('name') == 'DHCPSERVER':
dhcp_server = mapping['dhcp_server']
self.assertEqual(parameter.get('value'), dhcp_server)
elif parameter.get('name') == 'RASERVER':
ra_server = mapping.get('gateway_v6') + "/128"
self.assertEqual(parameter.get('value'), ra_server)
elif parameter.get('name') == 'PROJNET':
ipv4_cidr = network['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK':
ipv4_cidr = network['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), mask)
elif parameter.get('name') == 'PROJNET6':
ipv6_cidr = network['cidr_v6']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK6':
ipv6_cidr = network['cidr_v6']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), prefix)
else:
raise exception.InvalidParameterValue('unknown parameter '
'in filter')
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
class LibvirtUtilsTestCase(test.TestCase):
def test_get_iscsi_initiator(self):
self.mox.StubOutWithMock(utils, 'execute')
initiator = 'fake.initiator.iqn'
rval = ("junk\nInitiatorName=%s\njunk\n" % initiator, None)
utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
run_as_root=True).AndReturn(rval)
# Start test
self.mox.ReplayAll()
result = libvirt_utils.get_iscsi_initiator()
self.assertEqual(initiator, result)
def test_get_missing_iscsi_initiator(self):
self.mox.StubOutWithMock(utils, 'execute')
file_path = '/etc/iscsi/initiatorname.iscsi'
utils.execute('cat', file_path, run_as_root=True).AndRaise(
exception.FileNotFound(file_path=file_path)
)
# Start test
self.mox.ReplayAll()
result = libvirt_utils.get_iscsi_initiator()
self.assertIsNone(result)
def test_create_image(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G')
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
def test_create_cow_image(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
rval = ('', '')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', '/some/path').AndReturn(rval)
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'backing_file=/some/path',
'/the/new/cow')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'xen': ([True, 'phy'], [False, 'tap'], [None, 'tap']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
for (libvirt_type, checks) in type_map.iteritems():
self.flags(libvirt_type=libvirt_type)
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(is_block_dev)
self.assertEquals(result, expected_result)
def test_get_disk_size(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/some/path').AndReturn(('''image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M''', ''))
# Start test
self.mox.ReplayAll()
self.assertEquals(disk.get_disk_size('/some/path'), 4592640)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_write_to_file_with_umask(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
os.unlink(dst_path)
libvirt_utils.write_to_file(dst_path, 'hello', umask=0277)
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
mode = os.stat(dst_path).st_mode
self.assertEquals(mode & 0277, 0)
finally:
os.unlink(dst_path)
def test_chown(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('chown', 'soren', '/some/path', run_as_root=True)
self.mox.ReplayAll()
libvirt_utils.chown('/some/path', 'soren')
def _do_test_extract_snapshot(self, dest_format='raw', out_format='raw'):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
'-s', 'snap1', '/path/to/disk/image', '/extracted/snap')
# Start test
self.mox.ReplayAll()
libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
'snap1', '/extracted/snap', dest_format)
def test_extract_snapshot_raw(self):
self._do_test_extract_snapshot()
def test_extract_snapshot_iso(self):
self._do_test_extract_snapshot(dest_format='iso')
def test_extract_snapshot_qcow2(self):
self._do_test_extract_snapshot(dest_format='qcow2', out_format='qcow2')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEquals(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stubs.Set(os, 'statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEquals('/some/file/path', self.path)
self.assertEquals(8192000, fs_info['total'])
self.assertEquals(3686400, fs_info['free'])
self.assertEquals(4096000, fs_info['used'])
def test_fetch_image(self):
self.mox.StubOutWithMock(images, 'fetch_to_raw')
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.mox.ReplayAll()
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
def test_fetch_raw_image(self):
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_errror(path):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
return FakeImgInfo()
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os, 'rename', fake_rename)
self.stubs.Set(os, 'unlink', fake_unlink)
self.stubs.Set(images, 'fetch', lambda *_: None)
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
self.stubs.Set(utils, 'delete_if_exists', fake_rm_on_errror)
context = 'opaque context'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
target = 't.qcow2'
self.executes = []
expected_commands = [('qemu-img', 'convert', '-O', 'raw',
't.qcow2.part', 't.qcow2.converted'),
('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw,
context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
class LibvirtDriverTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.libvirtconnection = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
sys_meta = instance_types.save_instance_type_info(
{}, instance_types.get_instance_type_by_name('m1.tiny'))
inst = {}
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = 10
inst['ephemeral_gb'] = 20
inst['config_drive'] = 1
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['config_drive_id'] = 1
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = sys_meta
inst.update(params)
return db.instance_create(context.get_admin_context(), inst)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
self.assertRaises(AssertionError,
self.libvirtconnection.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.2', None, None)
def test_migrate_disk_and_power_off(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'},
{'type': 'raw', 'path': '/test/disk.local',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk.local',
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return disk_info_text
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance()
# dest is different host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.2', None, None)
self.assertEquals(out, disk_info_text)
# dest is same host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.1', None, None)
self.assertEquals(out, disk_info_text)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.NotFound
elif instance['name'] == "running":
return {'state': power_state.RUNNING}
else:
return {'state': power_state.SHUTDOWN}
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.NotFound,
self.libvirtconnection._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(utils.LoopingCallDone,
self.libvirtconnection._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.libvirtconnection._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_finish_migration(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'local_gb': 10, 'backing_file': '/base/disk'},
{'type': 'raw', 'path': '/test/disk.local',
'local_gb': 10, 'backing_file': '/base/disk.local'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_can_resize_fs(path, size, use_cow=False):
return False
def fake_extend(path, size):
pass
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None):
pass
def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
return {'state': power_state.RUNNING}
self.flags(use_cow_images=True)
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(libvirt_driver.disk, 'can_resize_fs',
fake_can_resize_fs)
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.libvirtconnection, '_create_image',
fake_create_image)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
self.libvirtconnection.finish_migration(
context.get_admin_context(), None, ins_ref,
disk_info_text, None, None, None)
def test_finish_revert_migration(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration. """
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
pass
def fake_get_info(instance):
return {'state': power_state.RUNNING}
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.libvirtconnection.finish_revert_migration(ins_ref, None)
def _test_finish_revert_migration_after_crash(self, backup_made, new_made):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.libvirtconnection, 'to_xml', lambda *a, **k: None)
self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
lambda *a: None)
self.stubs.Set(utils, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
os.path.exists('/fake/foo').AndReturn(new_made)
if new_made:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.libvirtconnection.finish_revert_migration({}, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.libvirtconnection._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.stubs.Set(self.libvirtconnection, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.libvirtconnection.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance_name = "fake-instance-name"
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, *args):
raise libvirt.libvirtError("Libvirt error")
def fake_lookup_by_name(instance_name):
return FakeExceptionDomain()
self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
fake_lookup_by_name)
self.assertRaises(exception.InstanceNotFound,
self.libvirtconnection.get_instance_disk_info,
instance_name)
class LibvirtVolumeUsageTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver
.get_all_volume_usage"""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
# creating instance
inst = {}
inst['uuid'] = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.ins_ref = db.instance_create(self.c, inst)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169L, 688640L, 0L, 0L, -1L)
self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError('invalid path')
self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.TestCase):
"""Test libvirt_nonblocking option."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(libvirt_nonblocking=True, libvirt_uri="test:///default")
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
| apache-2.0 |
clovett/MissionPlanner | Lib/encodings/cp850.py | 93 | 35059 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp850',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
u'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
u'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2017' # 0x00f2 -> DOUBLE LOW LINE
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
willhighland/azure-quickstart-templates | splunk-on-ubuntu/scripts/dobackup.py | 119 | 3256 | #!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 Microsoft. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Back up a tar-ball to Azure blob storage.
Usage:
python dobackup.py <path/file.tar>
"""
from azure.storage import CloudStorageAccount
import config, time, argparse
def _get_parameters():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="path+name of tar-ball to send to blob storage")
args = parser.parse_args()
input_file = args.input_file
return input_file
def _get_service():
account_name = config.STORAGE_ACCOUNT_NAME
account_key = config.STORAGE_ACCOUNT_KEY
account = CloudStorageAccount(account_name = account_name, account_key = account_key)
service = account.create_block_blob_service()
return service
# The last time a backup was dropped into the folder, it was named 'splunketccfg.tar'.
# This time, I rename that file to have a datetime stamp on the end of it.
# And then I copy the new backup to 'splunketccfg.tar'.
# This way, the newest backup is always 'splunketccfg.tar'. Easier to find when it's time to restore.
# The edge case is the first time backup is run. So I check for existence before trying to copy.
def _store_tarball(service, input_file):
trg_container_name = 'backups'
stacked_blob_name = 'splunketccfg_' + time.strftime('%m%d%YT%H%M%S') + '.tar'
newest_blob_name = 'splunketccfg.tar'
exists = service.exists(trg_container_name, newest_blob_name)
if exists:
source = service.make_blob_url(trg_container_name, newest_blob_name)
service.copy_blob(trg_container_name, stacked_blob_name, source)
service.create_blob_from_path(trg_container_name, newest_blob_name, input_file)
def main():
input_file = _get_parameters()
service = _get_service()
_store_tarball(service, input_file)
if __name__ == '__main__':
main()
| mit |
alhashash/account-financial-tools | account_auto_fy_sequence/models/__init__.py | 38 | 1157 | # coding=utf-8
##############################################################################
#
# account_auto_fy_sequence module for Odoo
# Copyright (C) 2014 ACSONE SA/NV (<http://acsone.eu>)
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# account_auto_fy_sequence is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License v3 or later
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# account_auto_fy_sequence is distributed
# in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License v3 or later for more details.
#
# You should have received a copy of the GNU Affero General Public License
# v3 or later along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import ir_sequence
from . import account_journal
| agpl-3.0 |
nhicher/ansible | lib/ansible/modules/network/avi/avi_sslprofile.py | 20 | 8497 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Enum options - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_aes_128_gcm_sha256,
- tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha384,
- tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha384, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_gcm_sha384,
- tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_256_cbc_sha256, tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_cbc_sha,
- tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_256_cbc_sha,
- tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_rc4_128_sha.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
- Units(SEC).
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Ssl profile type.
- Enum options - SSL_PROFILE_TYPE_APPLICATION, SSL_PROFILE_TYPE_SYSTEM.
- Field introduced in 17.2.8.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_PROFILE_TYPE_APPLICATION.
version_added: "2.6"
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
"""
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
python-diamond/Diamond | src/diamond/test/testcollector.py | 9 | 3608 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from mock import patch
from test import unittest
import configobj
from diamond.collector import Collector
class BaseCollectorTest(unittest.TestCase):
def test_SetCustomHostname(self):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {
'hostname': 'custom.localhost',
}
c = Collector(config, [])
self.assertEquals('custom.localhost', c.get_hostname())
def test_SetHostnameViaShellCmd(self):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {
'hostname': 'echo custom.localhost',
'hostname_method': 'shell',
}
c = Collector(config, [])
self.assertEquals('custom.localhost', c.get_hostname())
@patch('diamond.collector.get_hostname')
def test_get_metric_path_no_prefix(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = ''
config['collectors']['default']['path'] = 'bar'
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('bar.foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path_no_prefix_no_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = ''
config['collectors']['default']['path'] = ''
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path_no_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = 'bar'
config['collectors']['default']['path'] = ''
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('bar.foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path_dot_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = 'bar'
config['collectors']['default']['path'] = '.'
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('bar.foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = 'poof'
config['collectors']['default']['path'] = 'xyz'
get_hostname_mock.return_value = 'bar'
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('poof.bar.xyz.foo', result)
| mit |
rtrajano/sphinx_rtfd | docs/conf.py | 1 | 8478 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import sphinx_rtfd
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sphinx and RTFD Demo'
copyright = u'2014, Rigel Trajano'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = sphinx_rtfd.__version__
# The full version, including alpha/beta/rc tags.
release = sphinx_rtfd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx_rtfddoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'sphinx_rtfd.tex',
u'Sphinx and RTFD Demo Documentation',
u'Rigel Trajano', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinx_rtfd',
u'Sphinx and RTFD Demo Documentation',
[u'Rigel Trajano'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinx_rtfd',
u'Sphinx and RTFD Demo Documentation',
u'Rigel Trajano',
'sphinx_rtfd',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/contrib/kafka/python/ops/kafka_dataset_ops.py | 19 | 2777 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kafka Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kafka.python.ops import kafka_op_loader # pylint: disable=unused-import
from tensorflow.contrib.kafka.python.ops import gen_dataset_ops
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
class KafkaDataset(Dataset):
"""A Kafka Dataset that consumes the message.
"""
def __init__(self,
topics,
servers="localhost",
group="",
eof=False,
timeout=1000):
"""Create a KafkaReader.
Args:
topics: A `tf.string` tensor containing one or more subscriptions,
in the format of [topic:partition:offset:length],
by default length is -1 for unlimited.
servers: A list of bootstrap servers.
group: The consumer group id.
eof: If True, the kafka reader will stop on EOF.
timeout: The timeout value for the Kafka Consumer to wait
(in millisecond).
"""
super(KafkaDataset, self).__init__()
self._topics = ops.convert_to_tensor(
topics, dtype=dtypes.string, name="topics")
self._servers = ops.convert_to_tensor(
servers, dtype=dtypes.string, name="servers")
self._group = ops.convert_to_tensor(
group, dtype=dtypes.string, name="group")
self._eof = ops.convert_to_tensor(eof, dtype=dtypes.bool, name="eof")
self._timeout = ops.convert_to_tensor(
timeout, dtype=dtypes.int64, name="timeout")
def _as_variant_tensor(self):
return gen_dataset_ops.kafka_dataset(self._topics, self._servers,
self._group, self._eof, self._timeout)
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
| apache-2.0 |
xtmhm2000/scrapy-0.22 | scrapy/tests/test_contracts.py | 7 | 4126 | from unittest import TextTestRunner
from twisted.trial import unittest
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy.item import Item, Field
from scrapy.contracts import ContractsManager
from scrapy.contracts.default import (
UrlContract,
ReturnsContract,
ScrapesContract,
)
class TestItem(Item):
name = Field()
url = Field()
class ResponseMock(object):
url = 'http://scrapy.org'
class TestSpider(Spider):
name = 'demo_spider'
def returns_request(self, response):
""" method which returns request
@url http://scrapy.org
@returns requests 1
"""
return Request('http://scrapy.org', callback=self.returns_item)
def returns_item(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 1 1
"""
return TestItem(url=response.url)
def returns_fail(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 0 0
"""
return TestItem(url=response.url)
def scrapes_item_ok(self, response):
""" returns item with name and url
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return TestItem(name='test', url=response.url)
def scrapes_item_fail(self, response):
""" returns item with no name
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return TestItem(url=response.url)
def parse_no_url(self, response):
""" method with no url
@returns items 1 1
"""
pass
class ContractsManagerTest(unittest.TestCase):
contracts = [UrlContract, ReturnsContract, ScrapesContract]
def setUp(self):
self.conman = ContractsManager(self.contracts)
self.results = TextTestRunner()._makeResult()
self.results.stream = None
def should_succeed(self):
self.assertFalse(self.results.failures)
self.assertFalse(self.results.errors)
def should_fail(self):
self.assertTrue(self.results.failures)
self.assertFalse(self.results.errors)
def test_contracts(self):
spider = TestSpider()
# extract contracts correctly
contracts = self.conman.extract_contracts(spider.returns_request)
self.assertEqual(len(contracts), 2)
self.assertEqual(frozenset(type(x) for x in contracts),
frozenset([UrlContract, ReturnsContract]))
# returns request for valid method
request = self.conman.from_method(spider.returns_request, self.results)
self.assertNotEqual(request, None)
# no request for missing url
request = self.conman.from_method(spider.parse_no_url, self.results)
self.assertEqual(request, None)
def test_returns(self):
spider = TestSpider()
response = ResponseMock()
# returns_item
request = self.conman.from_method(spider.returns_item, self.results)
output = request.callback(response)
self.assertEqual([type(x) for x in output], [TestItem])
self.should_succeed()
# returns_request
request = self.conman.from_method(spider.returns_request, self.results)
output = request.callback(response)
self.assertEqual([type(x) for x in output], [Request])
self.should_succeed()
# returns_fail
request = self.conman.from_method(spider.returns_fail, self.results)
request.callback(response)
self.should_fail()
def test_scrapes(self):
spider = TestSpider()
response = ResponseMock()
# scrapes_item_ok
request = self.conman.from_method(spider.scrapes_item_ok, self.results)
output = request.callback(response)
self.assertEqual([type(x) for x in output], [TestItem])
self.should_succeed()
# scrapes_item_fail
request = self.conman.from_method(spider.scrapes_item_fail,
self.results)
request.callback(response)
self.should_fail()
| bsd-3-clause |
AndreasMadsen/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/core_test.py | 11 | 30549 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import re
import textwrap
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import test_util
class AxisTest(tf.test.TestCase):
def setUp(self):
d_7 = tf.Dimension(7)
p_rgb = ['red', 'green', 'blue']
self.i_7 = core.Axis('7', d_7)
self.i_7p = core.Axis('7prime', d_7)
self.i_rgb = core.Axis('rgb', p_rgb)
self.i_range = core.Axis('range', range(7))
self.i_unknown = core.Axis('unknown', None)
def test_equality(self):
axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown]
for i, axis_0 in enumerate(axes):
for j, axis_1 in enumerate(axes):
if i == j:
self.assertEqual(axis_0, axis_1)
else:
self.assertNotEqual(axis_0, axis_1)
def test_axis_value(self):
self.assertEqual(self.i_7.value, tf.Dimension(7))
self.assertTrue(self.i_range.value == tuple(range(7)))
def test_axis_input(self):
axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown]
for axis in axes:
self.assertEqual(axis, core.Axis(axis.name, axis.value))
def test_axis_value_input(self):
axis = self.i_range
for value in [range(7), list(range(7)), np.arange(7)]:
self.assertEqual(axis, core.Axis(axis.name, value))
def test_size(self):
self.assertEqual(len(self.i_7), 7)
self.assertEqual(len(self.i_rgb), 3)
self.assertEqual(len(self.i_range), 7)
self.assertEqual(self.i_unknown.size, None)
def test_concat_single(self):
red = core.Axis('rgb', ['red'])
self.assertEqual(core.concat_axes([red]), red)
def test_concat_many(self):
red = core.Axis('rgb', ['red'])
green = core.Axis('rgb', ['green'])
blue = core.Axis('rgb', ['blue'])
red_green_blue = core.Axis('rgb', ['red', 'green', 'blue'])
self.assertEqual(core.concat_axes([red, green, blue]), red_green_blue)
def test_concat_different_names(self):
red = core.Axis('red', ['red'])
green = core.Axis('green', ['red'])
with self.assertRaises(ValueError):
core.concat_axes([red, green])
def test_concat_unknown(self):
red = core.Axis('rgb', None)
green = core.Axis('rgb', None)
self.assertEqual(core.concat_axes([red, green]), red)
def test_repr(self):
self.assertEqual("Axis('7', Dimension(7))", repr(self.i_7))
def test_invalid_input(self):
with self.assertRaises(TypeError):
core.Axis('foo', [{}])
with self.assertRaises(ValueError):
core.Axis('foo', [1, 2, 3, 1])
red = core.Axis('foo', ['red'])
with self.assertRaises(tc.Error):
core.concat_axes([red, 1])
def test_as_axis(self):
self.assertEqual(self.i_7, core.as_axis(('7', 7)))
self.assertEqual(self.i_7, core.as_axis(self.i_7))
class AxesTest(tf.test.TestCase):
def setUp(self):
d_7 = tf.Dimension(7)
d_8 = tf.Dimension(8)
p_rgb = ['red', 'green', 'blue']
p_range = range(7)
self.i_8 = core.Axis('8', d_8)
self.a0 = core.Axes([('d7', d_7)])
self.a1 = core.Axes([('d7', d_7)])
self.a2 = core.Axes([('d7', d_7), ('rgb', p_rgb)])
self.a3 = core.Axes([('8', d_8), ('range', p_range)])
def test_equality(self):
self.assertEqual(self.a0, self.a0)
self.assertEqual(self.a0, self.a1)
self.assertNotEqual(self.a0, self.a2)
def test_repr(self):
self.assertEqual("Axes([('d7', Dimension(7))])", repr(self.a0))
def test_remove(self):
a = self.a3.remove('range')
self.assertEqual(a, core.Axes([self.i_8]))
with self.assertRaises(KeyError):
self.a3.remove('foobar')
def test_typecheck_error_message(self):
pattern = ('List(Union(labeled_tensor.Axis, Tuple(..., '
'Union(Union(numpy.ndarray, %s, list, tuple), '
'Optional(Union(tensorflow.Dimension, int))))))' %
range.__name__)
regexp = re.escape(pattern).replace(re.escape('...'), '.*')
with self.assertRaisesRegexp(tc.Error, 'allowed type ' + regexp):
core.Axes(None)
class LabeledTensorTest(test_util.Base):
def setUp(self):
tensor = tf.ones([7, 3, 8, 1])
a0 = ('x', range(7))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = ('y', 8)
a3 = ('z', tf.Dimension(1))
self.lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
def test_repr(self):
pattern = textwrap.dedent("""\
<LabeledTensor '...' shape=(7, 3, 8, 1) dtype=float32
axes=[('x', ...),
('channel', ...),
('y', Dimension(8)),
('z', Dimension(1))]>""")
regexp = re.escape(pattern).replace(re.escape('...'), '.*')
self.assertRegexpMatches(repr(self.lt), regexp)
def test_reuse_existing_axes(self):
alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes)
self.assertLabeledTensorsEqual(alt_lt, self.lt)
def test_reuse_existing_axis_objects(self):
alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes.values())
self.assertLabeledTensorsEqual(alt_lt, self.lt)
def test_indexing_scalars(self):
actual = self.lt[:, :, :, 0]
expected = core.LabeledTensor(self.lt.tensor[:, :, :, 0],
list(self.lt.axes.values())[:-1])
self.assertLabeledTensorsEqual(actual, expected)
actual = self.lt[1, :, :, 0]
expected = core.LabeledTensor(self.lt.tensor[1, :, :, 0],
list(self.lt.axes.values())[1:-1])
self.assertLabeledTensorsEqual(actual, expected)
actual = self.lt[1, 2, :, 0]
expected = core.LabeledTensor(self.lt.tensor[1, 2, :, 0],
list(self.lt.axes.values())[2:-1])
self.assertLabeledTensorsEqual(actual, expected)
def test_indexing_1d(self):
lt_1d = self.lt[1, 2, :, 0]
actual = lt_1d[3]
expected = core.LabeledTensor(lt_1d.tensor[3], [])
self.assertLabeledTensorsEqual(actual, expected)
def test_indexing_slices(self):
actual = self.lt[:3, :, :, :]
axes = [('x', range(3))] + list(self.lt.axes.values())[1:]
expected = core.LabeledTensor(self.lt.tensor[:3, :, :, :], axes)
self.assertLabeledTensorsEqual(actual, expected)
def test_invalid_indexing(self):
with self.assertRaises(ValueError):
self.lt[0] # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
self.lt[:, :, :, :, 0] # pylint: disable=pointless-statement
def test_unknown_size(self):
tensor = tf.placeholder(tf.string, [None])
actual = core.LabeledTensor(tensor, ['x'])
self.assertIsNone(actual.axes['x'].size)
self.assertIs(actual.axes['x'].value, tensor.get_shape()[0])
def test_eq(self):
self.assertEqual(self.lt, self.lt)
self.assertNotEqual(self.lt, self.lt.tensor)
self.assertNotEqual(self.lt.tensor, self.lt)
def test_hash(self):
lt1 = self.lt
lt2 = core.LabeledTensor(self.lt.tensor, self.lt.axes)
self.assertEqual(lt1, lt2)
self.assertEqual(hash(lt1), hash(lt2))
def test_name(self):
self.assertEqual(self.lt.name, self.lt.tensor.name)
def test_dtype(self):
self.assertEqual(self.lt.dtype, self.lt.tensor.dtype)
def test_get_shape(self):
self.assertEqual(self.lt.get_shape(), self.lt.tensor.get_shape())
def test_convert_to_tensor(self):
expected = self.lt.tensor
actual = tf.convert_to_tensor(self.lt)
self.assertIs(expected, actual)
class Base(test_util.Base):
def setUp(self):
self.x_size = 7
self.channel_size = 3
self.z_size = 4
self.probs_size = 11
tensor = tf.range(0, self.x_size * self.channel_size * self.z_size *
self.probs_size)
tensor = tf.reshape(tensor, [self.x_size, self.channel_size, self.z_size,
self.probs_size])
a0 = ('x', range(self.x_size))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = 'z'
a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size))
self.tensor = tensor
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0,
'channel': 0})
self.channel_probs_lt = core.slice_function(self.original_lt, {'x': 3,
'z': 0})
class IdentityTest(Base):
def test_name(self):
identity_lt = core.identity(self.original_lt)
self.assertIn('lt_identity', identity_lt.name)
class SliceFunctionTest(Base):
def test_name(self):
select_lt = core.slice_function(self.original_lt, {'channel': 1})
self.assertIn('lt_slice', select_lt.name)
def test_scalar(self):
select_lt = core.slice_function(self.original_lt, {'channel': 1})
golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :], [self.a0, self.a2,
self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice(self):
select_lt = core.slice_function(self.original_lt, {'channel': slice(0, 2)})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slices(self):
select_lt = core.slice_function(self.original_lt, {'x': slice(1, 5),
'channel': slice(1,
None)})
a0_sliced = ('x', range(1, 5))
a1_sliced = ('channel', ['green', 'blue'])
golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :],
[a0_sliced, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice_unlabeled(self):
select_lt = core.slice_function(self.original_lt, {'z': slice(1, 3)})
a2_sliced = 'z'
golden_lt = core.LabeledTensor(self.tensor[:, :, 1:3, :],
[self.a0, self.a1, a2_sliced, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice_unknown_shape(self):
lt = core.LabeledTensor(tf.placeholder(tf.float32, [None, 1]), ['x', 'y'])
sliced_lt = core.slice_function(lt, {'y': 0})
self.assertEqual(list(sliced_lt.axes.values()), [lt.axes['x']])
class TransposeTest(Base):
def test_name(self):
transpose_lt = core.transpose(self.original_lt,
self.original_lt.axes.keys())
self.assertIn('lt_transpose', transpose_lt.name)
def test_identity(self):
transpose_lt = core.transpose(self.original_lt,
self.original_lt.axes.keys())
golden_lt = self.original_lt
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test(self):
transpose_lt = core.transpose(self.original_lt, ['z', 'channel', 'x',
'probs'])
golden_lt = core.LabeledTensor(
tf.transpose(self.tensor, [2, 1, 0, 3]),
[self.a2, self.a1, self.a0, self.a3])
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test_default_axis_order(self):
transpose_lt = core.transpose(self.original_lt)
golden_lt = core.LabeledTensor(
tf.transpose(self.tensor, [3, 2, 1, 0]),
list(reversed(list(self.original_lt.axes.values()))))
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
core.transpose(self.original_lt, ['channel', 'x', 'probs'])
with self.assertRaises(ValueError):
core.transpose(self.original_lt, ['z', 'foo', 'x', 'probs'])
class ExpandDimsTest(Base):
def test_name(self):
expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys())
self.assertIn('lt_expand', expand_lt.name)
def test_identity(self):
expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys())
golden_lt = self.original_lt
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test(self):
expand_lt = core.expand_dims(self.original_lt, ['foo', 'x', 'bar',
'channel', 'z', 'probs',
'grok'])
golden_lt = core.LabeledTensor(
tf.reshape(self.tensor, [1, self.x_size, 1, self.channel_size,
self.z_size, self.probs_size, 1]),
['foo', self.a0, 'bar', self.a1, self.a2, self.a3, 'grok'])
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test_label(self):
expand_lt = core.expand_dims(self.original_lt, ['x',
'channel',
('foo', 'bar'),
'z',
'probs',])
golden_lt = core.LabeledTensor(
tf.reshape(self.tensor, [self.x_size, self.channel_size, 1, self.z_size,
self.probs_size]),
[self.a0, self.a1, ('foo', ['bar']), self.a2, self.a3])
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test_unknown_dimension(self):
orig_lt = core.LabeledTensor(tf.placeholder(tf.float32, [None]), ['x'])
expand_lt = core.expand_dims(orig_lt, ['x', 'y'])
self.assertEqual(expand_lt.axes, core.Axes([('x', None), ('y', 1)]))
def test_invalid_input(self):
with self.assertRaises(core.AxisOrderError):
core.expand_dims(self.original_lt, ['foo', 'not_x', 'bar', 'channel', 'z',
'probs', 'grok'])
with self.assertRaises(core.AxisOrderError):
core.expand_dims(self.original_lt, ['foo', 'z', 'bar', 'channel', 'x',
'probs', 'grok'])
class AxisOrderScopeTest(Base):
def test(self):
xyz = ['x', 'y', 'z']
abc = ['a', 'b', 'c']
self.assertIsNone(core.get_axis_order())
with core.axis_order_scope(xyz):
self.assertEqual(core.get_axis_order(), xyz)
with core.axis_order_scope():
self.assertIsNone(core.get_axis_order())
with core.axis_order_scope(abc):
self.assertEqual(core.get_axis_order(), abc)
self.assertIsNone(core.get_axis_order())
self.assertEqual(core.get_axis_order(), xyz)
self.assertIsNone(core.get_axis_order())
class CheckAxisOrderTest(Base):
def test_passes(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.ones((1, 1, 1, 1)), axis_order)
core.check_axis_order(lt, axis_order)
lt = core.LabeledTensor(tf.ones((1, 1, 1)), axis_order[1:])
core.check_axis_order(lt, axis_order)
lt = core.LabeledTensor(tf.ones((1, 1, 1)), axis_order[:-1])
core.check_axis_order(lt, axis_order)
def test_invalid(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.ones((1, 1, 1, 1)), axis_order)
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt)
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt, axis_order[:-1])
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt, axis_order[::-1])
def test_scope(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.ones((1, 1, 1, 1)), axis_order)
with core.axis_order_scope(axis_order):
core.check_axis_order(lt)
class ImposeAxisOrderTest(Base):
def test_identity(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.reshape(tf.range(24), (1, 2, 3, 4)), axis_order)
actual = core.impose_axis_order(lt, axis_order)
self.assertLabeledTensorsEqual(lt, actual)
lt = core.LabeledTensor(tf.reshape(tf.range(6), (1, 2, 3)), axis_order[:3])
actual = core.impose_axis_order(lt, axis_order)
self.assertLabeledTensorsEqual(lt, actual)
def test_reverse(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.reshape(tf.range(24), (1, 2, 3, 4)), axis_order)
actual = core.impose_axis_order(lt, axis_order[::-1])
expected = core.transpose(lt, axis_order[::-1])
self.assertLabeledTensorsEqual(expected, actual)
lt = core.LabeledTensor(tf.reshape(tf.range(6), (1, 2, 3)), axis_order[:3])
actual = core.impose_axis_order(lt, axis_order[::-1])
expected = core.transpose(lt, ['y', 'x', 'w'])
self.assertLabeledTensorsEqual(expected, actual)
def test_scope(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.reshape(tf.range(24), (1, 2, 3, 4)), axis_order)
expected = core.transpose(lt, axis_order[::-1])
with core.axis_order_scope(axis_order[::-1]):
actual = core.impose_axis_order(lt)
self.assertLabeledTensorsEqual(expected, actual)
def test_invalid(self):
lt = core.LabeledTensor(tf.reshape(tf.range(2), (1, 2)), ['x', 'y'])
with self.assertRaises(ValueError):
core.impose_axis_order(lt)
with self.assertRaises(ValueError):
core.impose_axis_order(lt, ['x'])
class FindConsistentOrderingTest(Base):
def test(self):
cases = [
([], [], []),
(['x'], [], ['x']),
([], ['x'], ['x']),
(['x'], ['x'], ['x']),
(['x'], ['y'], ['x', 'y']),
(['y'], ['x'], ['y', 'x']),
(['x', 'y'], ['x', 'y'], ['x', 'y']),
(['x', 'y'], ['y', 'x'], None),
(['x', 'y'], ['y', 'z'], ['x', 'y', 'z']),
(['x', 'z'], ['y', 'z'], ['x', 'y', 'z']),
(['x', 'y'], ['x', 'z'], ['x', 'y', 'z']),
(['w', 'x'], ['y', 'z'], ['w', 'x', 'y', 'z']),
(['x', 'y', 'z'], ['z', 'x'], None),
(['x', 'y', 'z'], ['x'], ['x', 'y', 'z']),
([], ['x', 'y', 'z'], ['x', 'y', 'z']),
]
for a, b, expected in cases:
actual = core._find_consistent_ordering(a, b)
msg = ('unexpected ordering between %r and %r:\nexpected: %r\nactual: %r'
% (a, b, expected, actual))
self.assertEqual(expected, actual, msg=msg)
class AlignTest(Base):
def test_name(self):
align_lt_0, align_lt_1, _ = core.align(self.original_lt, self.original_lt)
self.assertIn('lt_align', align_lt_0.name)
self.assertIn('/0', align_lt_0.name)
self.assertIn('lt_align', align_lt_1.name)
self.assertIn('/1', align_lt_1.name)
def test_identical_shaped_inputs(self):
offset_tensor = self.original_lt.tensor + 1
offset_lt = core.LabeledTensor(offset_tensor, self.original_lt.axes)
align_lt, align_offset_lt, broadcast_axes = core.align(self.original_lt,
offset_lt)
self.assertLabeledTensorsEqual(align_lt, self.original_lt)
self.assertLabeledTensorsEqual(align_offset_lt, offset_lt)
self.assertEqual(broadcast_axes, self.original_lt.axes)
def test_different_inputs(self):
# The correct axis ordering is ['x', 'channel', 'probs'].
align_x_probs_lt, align_channel_probs_lt, broadcast_axes = core.align(
self.x_probs_lt, self.channel_probs_lt)
x_probs_golden_lt = core.LabeledTensor(
tf.reshape(self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]),
[self.a0, 'channel', self.a3])
self.assertLabeledTensorsEqual(align_x_probs_lt, x_probs_golden_lt)
channel_probs_golden_lt = core.LabeledTensor(
tf.reshape(self.channel_probs_lt.tensor,
[1, self.channel_size, self.probs_size]),
['x', self.a1, self.a3])
self.assertLabeledTensorsEqual(align_channel_probs_lt,
channel_probs_golden_lt)
self.assertEqual(broadcast_axes, core.Axes([self.a0, self.a1, self.a3]))
def test_axis_order_scope(self):
xz_lt = core.LabeledTensor(tf.ones((2, 3)), ['x', 'z'])
yz_lt = core.LabeledTensor(tf.ones((4, 3)), ['y', 'z'])
_, _, broadcast_axes = core.align(xz_lt, yz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])
_, _, broadcast_axes = core.align(yz_lt, xz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['y', 'x', 'z'])
with core.axis_order_scope(['x', 'y', 'z']):
_, _, broadcast_axes = core.align(yz_lt, xz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])
with core.axis_order_scope(['x', 'y']):
with self.assertRaises(core.AxisOrderError):
core.align(xz_lt, yz_lt)
with self.assertRaises(core.AxisOrderError):
core.align(yz_lt, xz_lt)
def test_invalid_input(self):
lt_0 = core.LabeledTensor(tf.zeros([5]), [('a', range(5))])
lt_1 = core.LabeledTensor(tf.zeros([5]), [('a', range(1, 6))])
with self.assertRaises(ValueError):
core.align(lt_0, lt_1)
class ConvertToLabeledTensorTest(Base):
# TODO(shoyer): Simplify these tests once we can reuse labeled tensors in
# assertLabeledTensorsEqual.
def test_labeled_tensor(self):
actual = core.convert_to_labeled_tensor(self.original_lt)
self.assertLabeledTensorsEqual(actual, self.original_lt)
def test_python_scalar(self):
actual = core.convert_to_labeled_tensor(42)
golden_lt = core.LabeledTensor(tf.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_numpy_array(self):
actual = core.convert_to_labeled_tensor(np.array(42))
golden_lt = core.LabeledTensor(tf.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_tensor(self):
actual = core.convert_to_labeled_tensor(tf.constant(42))
golden_lt = core.LabeledTensor(tf.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
core.convert_to_labeled_tensor(tf.range(5))
with self.assertRaises(ValueError):
core.convert_to_labeled_tensor(np.array([1, 2]))
class DocStringCheckMixin(object):
# requires self.ops to be defined
def test_function_docstring_and_name(self):
for op_name, _, _, lt_op in self.ops:
if lt_op is not None:
self.assertIn('tf.%s' % op_name, lt_op.__doc__)
self.assertEqual(op_name, lt_op.__name__)
class UnaryOpsTestsMixin(object):
# requires self.ops and self.test_lt to be defined
def test_core_op(self):
for op_name, _, tf_op, lt_op in self.ops:
if tf_op is not None:
golden_lt = core.LabeledTensor(tf_op(self.test_lt.tensor),
self.test_lt.axes)
actual_lt = lt_op(self.test_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_infix(self):
for op_name, infix_op, _, _ in self.ops:
if infix_op is not None:
expected_lt = core.LabeledTensor(infix_op(self.test_lt.tensor),
self.test_lt.axes)
actual_lt = infix_op(self.test_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class CoreUnaryOpsTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin):
def setUp(self):
super(CoreUnaryOpsTest, self).setUp()
self.ops = [
('abs', operator.abs, tf.abs, core.abs_function),
('neg', operator.neg, tf.neg, core.neg),
# TODO(shoyer): add unary + to core TensorFlow
('pos', None, None, None),
('sign', None, tf.sign, core.sign),
('reciprocal', None, tf.reciprocal, core.reciprocal),
('square', None, tf.square, core.square),
('round', None, tf.round, core.round_function),
('sqrt', None, tf.sqrt, core.sqrt),
('rsqrt', None, tf.rsqrt, core.rsqrt),
('log', None, tf.log, core.log),
('exp', None, tf.exp, core.exp),
('log', None, tf.log, core.log),
('ceil', None, tf.ceil, core.ceil),
('floor', None, tf.floor, core.floor),
('cos', None, tf.cos, core.cos),
('sin', None, tf.sin, core.sin),
('tan', None, tf.tan, core.tan),
('acos', None, tf.acos, core.acos),
('asin', None, tf.asin, core.asin),
('atan', None, tf.atan, core.atan),
('lgamma', None, tf.lgamma, core.lgamma),
('digamma', None, tf.digamma, core.digamma),
('erf', None, tf.erf, core.erf),
('erfc', None, tf.erfc, core.erfc),
('lgamma', None, tf.lgamma, core.lgamma),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
self.test_lt = core.LabeledTensor(
tf.cast(self.original_lt, tf.float32) / total_size,
self.original_lt.axes)
class LogicalNotTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin):
def setUp(self):
super(LogicalNotTest, self).setUp()
self.ops = [
('logical_not', operator.invert, tf.logical_not, core.logical_not),
]
self.test_lt = self.original_lt < 10
class BinaryOpsTestsMixin(object):
# requires self.ops, self.test_lt_1, self.test_lt_2, self.test_lt_1_broadcast
# and self.test_lt_2_broadcast to be defined
def test_core_op(self):
for op_name, _, tf_op, lt_op in self.ops:
golden_tensor = tf_op(self.test_lt_1_broadcast,
self.test_lt_2_broadcast)
golden_lt = core.LabeledTensor(golden_tensor, self.broadcast_axes)
actual_lt = lt_op(self.test_lt_1, self.test_lt_2)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_infix(self):
for op_name, infix_op, _, lt_op in self.ops:
if infix_op is not None:
expected_lt = lt_op(self.test_lt_1, self.test_lt_2)
actual_lt = infix_op(self.test_lt_1, self.test_lt_2)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class CoreBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(CoreBinaryOpsTest, self).setUp()
self.x_probs_broadcast_tensor = tf.reshape(
self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])
self.channel_probs_broadcast_tensor = tf.reshape(
self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])
# == and != are not element-wise for tf.Tensor, so they shouldn't be
# elementwise for LabeledTensor, either.
self.ops = [
('add', operator.add, tf.add, core.add),
('sub', operator.sub, tf.sub, core.sub),
('mul', operator.mul, tf.mul, core.mul),
('div', operator.truediv, tf.div, core.div),
('mod', operator.mod, tf.mod, core.mod),
('pow', operator.pow, tf.pow, core.pow_function),
('equal', None, tf.equal, core.equal),
('less', operator.lt, tf.less, core.less),
('less_equal', operator.le, tf.less_equal, core.less_equal),
('not_equal', None, tf.not_equal, core.not_equal),
('greater', operator.gt, tf.greater, core.greater),
('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
]
self.test_lt_1 = self.x_probs_lt
self.test_lt_2 = self.channel_probs_lt
self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
self.broadcast_axes = [self.a0, self.a1, self.a3]
def test_reflexive(self):
labeled_tensor = self.x_probs_lt + 1 # all elements must be >0 for division
for op_name, infix_op, _, lt_op in self.ops:
if infix_op is not None:
expected_lt = lt_op(2, labeled_tensor)
actual_lt = infix_op(2, labeled_tensor)
# Python uses greater for the reflexive version of less (and vise-versa)
if 'less' in op_name:
op_name = op_name.replace('less', 'greater')
elif 'greater' in op_name:
op_name = op_name.replace('greater', 'less')
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class LogicalBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(LogicalBinaryOpsTest, self).setUp()
self.ops = [
('logical_and', operator.and_, tf.logical_and, core.logical_and),
('logical_or', operator.or_, tf.logical_or, core.logical_or),
('logical_xor', operator.xor, tf.logical_xor, core.logical_xor),
]
self.test_lt_1 = self.original_lt < 10
self.test_lt_2 = self.original_lt < 5
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
class FloatBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(FloatBinaryOpsTest, self).setUp()
self.ops = [
('igamma', None, tf.igamma, core.igamma),
('igammac', None, tf.igammac, core.igammac),
('zeta', None, tf.zeta, core.zeta),
('polygamma', None, tf.polygamma, core.polygamma),
('maximum', None, tf.maximum, core.maximum),
('minimum', None, tf.minimum, core.minimum),
('squared_difference', None, tf.squared_difference,
core.squared_difference),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
test_lt = core.LabeledTensor(
tf.cast(self.original_lt, tf.float32) / total_size,
self.original_lt.axes)
self.test_lt_1 = test_lt
self.test_lt_2 = 1.0 - test_lt
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
gunchleoc/django | tests/shortcuts/tests.py | 132 | 3291 | from django.test import SimpleTestCase, override_settings
from django.test.utils import require_jinja2
@override_settings(
ROOT_URLCONF='shortcuts.urls',
)
class ShortcutTests(SimpleTestCase):
def test_render_to_response(self):
response = self.client.get('/render_to_response/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_multiple_templates(self):
response = self.client.get('/render_to_response/multiple_templates/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
def test_render_to_response_with_content_type(self):
response = self.client.get('/render_to_response/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render_to_response_with_status(self):
response = self.client.get('/render_to_response/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'FOO.BAR..\n')
@require_jinja2
def test_render_to_response_with_using(self):
response = self.client.get('/render_to_response/using/')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render_to_response/using/?using=django')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render_to_response/using/?using=jinja2')
self.assertEqual(response.content, b'Jinja2\n')
def test_render(self):
response = self.client.get('/render/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertFalse(hasattr(response.context.request, 'current_app'))
def test_render_with_multiple_templates(self):
response = self.client.get('/render/multiple_templates/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/multiple_templates/\n')
def test_render_with_content_type(self):
response = self.client.get('/render/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/content_type/\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render_with_status(self):
response = self.client.get('/render/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'FOO.BAR../render/status/\n')
@require_jinja2
def test_render_with_using(self):
response = self.client.get('/render/using/')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=django')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=jinja2')
self.assertEqual(response.content, b'Jinja2\n')
| bsd-3-clause |
Kagee/youtube-dl | youtube_dl/extractor/nhl.py | 9 | 6949 | from __future__ import unicode_literals
import re
import json
import os
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_urllib_parse,
compat_urllib_parse_urlparse
)
from ..utils import (
unified_strdate,
)
class NHLBaseInfoExtractor(InfoExtractor):
@staticmethod
def _fix_json(json_string):
return json_string.replace('\\\'', '\'')
def _real_extract_video(self, video_id):
json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id
data = self._download_json(
json_url, video_id, transform_source=self._fix_json)
return self._extract_video(data[0])
def _extract_video(self, info):
video_id = info['id']
self.report_extraction(video_id)
initial_video_url = info['publishPoint']
if info['formats'] == '1':
parsed_url = compat_urllib_parse_urlparse(initial_video_url)
filename, ext = os.path.splitext(parsed_url.path)
path = '%s_sd%s' % (filename, ext)
data = compat_urllib_parse.urlencode({
'type': 'fvod',
'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
})
path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
path_doc = self._download_xml(
path_url, video_id, 'Downloading final video url')
video_url = path_doc.find('path').text
else:
video_url = initial_video_url
join = compat_urlparse.urljoin
return {
'id': video_id,
'title': info['name'],
'url': video_url,
'description': info['description'],
'duration': int(info['duration']),
'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
'upload_date': unified_strdate(info['releaseDate'].split('.')[0]),
}
class NHLIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com'
_VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console)?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
_TESTS = [{
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
'md5': 'db704a4ea09e8d3988c85e36cc892d09',
'info_dict': {
'id': '453614',
'ext': 'mp4',
'title': 'Quick clip: Weise 4-3 goal vs Flames',
'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',
'duration': 18,
'upload_date': '20131006',
},
}, {
'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
'md5': 'd22e82bc592f52d37d24b03531ee9696',
'info_dict': {
'id': '2014020024-628-h',
'ext': 'mp4',
'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
'duration': 0,
'upload_date': '20141011',
},
}, {
'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',
'md5': 'c78fc64ea01777e426cfc202b746c825',
'info_dict': {
'id': '58665',
'ext': 'flv',
'title': 'Classic Game In Six - April 22, 1979',
'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',
'duration': 400,
'upload_date': '20100129'
},
}, {
'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
'only_matching': True,
}, {
'url': 'http://video.nhl.com/videocenter/?id=736722',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._real_extract_video(video_id)
class NHLNewsIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:news'
IE_DESC = 'NHL news'
_VALID_URL = r'https?://(?:www\.)?nhl\.com/ice/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
_TEST = {
'url': 'http://www.nhl.com/ice/news.htm?id=750727',
'md5': '4b3d1262e177687a3009937bd9ec0be8',
'info_dict': {
'id': '736722',
'ext': 'mp4',
'title': 'Cal Clutterbuck has been fined $2,000',
'description': 'md5:45fe547d30edab88b23e0dd0ab1ed9e6',
'duration': 37,
'upload_date': '20150128',
},
}
def _real_extract(self, url):
news_id = self._match_id(url)
webpage = self._download_webpage(url, news_id)
video_id = self._search_regex(
[r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'"],
webpage, 'video id')
return self._real_extract_video(video_id)
class NHLVideocenterIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:videocenter'
IE_DESC = 'NHL videocenter category'
_VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?[^(id=)]*catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
_TEST = {
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
'info_dict': {
'id': '999',
'title': 'Highlights',
},
'playlist_count': 12,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
team = mobj.group('team')
webpage = self._download_webpage(url, team)
cat_id = self._search_regex(
[r'var defaultCatId = "(.+?)";',
r'{statusIndex:0,index:0,.*?id:(.*?),'],
webpage, 'category id')
playlist_title = self._html_search_regex(
r'tab0"[^>]*?>(.*?)</td>',
webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
data = compat_urllib_parse.urlencode({
'cid': cat_id,
# This is the default value
'count': 12,
'ptrs': 3,
'format': 'json',
})
path = '/videocenter/servlets/browse?' + data
request_url = compat_urlparse.urljoin(url, path)
response = self._download_webpage(request_url, playlist_title)
response = self._fix_json(response)
if not response.strip():
self._downloader.report_warning('Got an empty reponse, trying '
'adding the "newvideos" parameter')
response = self._download_webpage(request_url + '&newvideos=true',
playlist_title)
response = self._fix_json(response)
videos = json.loads(response)
return {
'_type': 'playlist',
'title': playlist_title,
'id': cat_id,
'entries': [self._extract_video(v) for v in videos],
}
| unlicense |
maryklayne/Funcao | examples/intermediate/mplot3d.py | 14 | 1261 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
noba3/KoTos | addons/plugin.video.xstream/sites/gstream_in.py | 1 | 15009 | # -*- coding: utf-8 -*-
from resources.lib.util import cUtil
from resources.lib.gui.gui import cGui
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.lib.config import cConfig
from resources.lib import logger
from resources.lib.handler.ParameterHandler import ParameterHandler
import hashlib
SITE_IDENTIFIER = 'gstream_in'
SITE_NAME = 'G-Stream'
SITE_ICON = 'gstream.png'
URL_MAIN = 'http://gstream.to'
URL_LOGIN = URL_MAIN + '/login.php'
URL_SHOW_MOVIE = 'http://gstream.to/showthread.php?t='
URL_CATEGORIES = 'http://gstream.to/forumdisplay.php?f='
URL_SEARCH = 'http://gstream.to/search.php'
oConfig = cConfig()
username = oConfig.getSetting('gstream_in-username')
password = oConfig.getSetting('gstream_in-password')
def load():
oGui = cGui()
sSecurityValue = __getSecurityCookieValue()
__login()
__createMainMenuEntry(oGui, 'Aktuelle KinoFilme', 542, sSecurityValue)
oGui.addFolder(cGuiElement('HD Filme',SITE_IDENTIFIER,'showHDMovies'))
__createMainMenuEntry(oGui, 'Action', 591, sSecurityValue)
__createMainMenuEntry(oGui, 'Horror', 593, sSecurityValue)
__createMainMenuEntry(oGui, 'Komoedie', 592, sSecurityValue)
__createMainMenuEntry(oGui, 'Thriller', 595, sSecurityValue)
__createMainMenuEntry(oGui, 'Drama', 594, sSecurityValue)
__createMainMenuEntry(oGui, 'Fantasy', 655, sSecurityValue)
__createMainMenuEntry(oGui, 'Abenteuer', 596, sSecurityValue)
__createMainMenuEntry(oGui, 'Animation', 677, sSecurityValue)
__createMainMenuEntry(oGui, 'Dokumentation', 751, sSecurityValue)
#__createMainMenuEntry(oGui, 'Serien', 543, sSecurityValue)
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('displaySearch')
oGuiElement.setTitle('Suche Filme')
params = ParameterHandler()
params.setParam('securityCookie', sSecurityValue)
params.setParam('searchType', '528')
oGui.addFolder(oGuiElement, params)
# Serien parsing nicht implementiert
#oGuiElement = cGuiElement()
#oGuiElement.setSiteName(SITE_IDENTIFIER)
#oGuiElement.setFunction('displaySearch')
#oGuiElement.setTitle('Suche Serien')
#params.setParam('searchType', '532')
#oGui.addFolder(oGuiElement, params)
if showAdult():
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('showXXX')
oGuiElement.setTitle('XXX')
oGui.addFolder(oGuiElement, params)
oGui.setEndOfDirectory()
def __login():
hPassword = hashlib.md5(password).hexdigest()
oRequest = cRequestHandler(URL_LOGIN)
oRequest.addParameters('vb_login_username', username)
oRequest.addParameters('vb_login_password', '')
oRequest.addParameters('s', '')
oRequest.addParameters('do', 'login')
oRequest.addParameters('vb_login_md5password', hPassword)
oRequest.addParameters('vb_login_md5password_utf', hPassword)
oRequest.ignoreDiscard(True)
oRequest.request()
# needed to add this, so other sites doesn't delete the cookie in global search
# alternatively we could call login in showHoster, but this would generate more login requests...
cookie = oRequest.getCookie("bbsessionhash")
if cookie:
cookie.discard = False
oRequest.setCookie(cookie)
def __createMainMenuEntry(oGui, sMenuName, iCategoryId, sSecurityValue=''):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setTitle(sMenuName)
oGuiElement.setFunction('parseMovieResultSite')
params = ParameterHandler()
params.setParam('normalySiteUrl', URL_CATEGORIES + str(iCategoryId) + '&order=desc&page=')
params.setParam('siteUrl', URL_CATEGORIES + str(iCategoryId) + '&order=desc&page=1')
params.setParam('iPage', 1)
params.setParam('securityCookie', sSecurityValue)
oGui.addFolder(oGuiElement, params)
def __getSecurityCookieValue():
oRequest = cRequestHandler(URL_MAIN, False, True)
oRequest.ignoreDiscard(True)
sHtmlContent = oRequest.request()
header = oRequest.getResponseHeader()
sPattern = '>DDoS protection by CloudFlare<'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
logger.info('No known protection found')
return ''
logger.info('CF DDos protection active')
#Challengeform suchen
sPattern = ('a\.value = ([0-9\*\+\-]+);.*?<form id="challenge-form" action="([^"]+)".*?'
'name="([^"]+)" value="([^"]+)".*?name="([^"]+)"/>.*?</form>')
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
logger.info('ChallengeForm not found')
return False
aResult = aResult[1][0]
constant = len(oRequest.getRealUrl().split('/')[2])
exp = aResult[0]
url = aResult[1]
valueName1 = aResult[2]
value1 = aResult[3]
valueName2 = aResult[4]
value2 = str(eval(exp)+constant)
url = '%s%s?%s=%s&%s=%s' % (URL_MAIN, url, valueName1, value1, valueName2, value2)
oRequest = cRequestHandler(url, caching = False, ignoreErrors = True)
oRequest.addHeaderEntry('Host', 'gstream.to')
oRequest.addHeaderEntry('Referer', URL_MAIN)
oRequest.addHeaderEntry('Connection', 'keep-alive')
oRequest.addHeaderEntry('DNT', '1')
oRequest.ignoreDiscard(True)
sHtmlContent = oRequest.request()
return True
def __getHtmlContent(sUrl = None, sSecurityValue=None):
params = ParameterHandler()
# Test if a url is available and set it
if sUrl is None and not params.exist('siteUrl'):
logger.info("There is no url we can request.")
return False
else:
if sUrl is None:
sUrl = params.getValue('siteUrl')
# Test if a security value is available
if sSecurityValue is None:
if params.exist('securityCookie'):
sSecurityValue = params.getValue('securityCookie')
else :
sSecurityValue = ''
# Make the request
oRequest = cRequestHandler(sUrl)
#oRequest.addHeaderEntry('Cookie', sSecurityValue)
#oRequest.addHeaderEntry('Accept', '*/*')
#oRequest.addHeaderEntry('Host', 'gstream.to')
oRequest.ignoreDiscard(True)
return oRequest.request()
def showXXX():
params = ParameterHandler()
oGui = cGui()
__createMainMenuEntry(oGui, 'Alle Pornos', 661)
#im Moment können keine Clips abgespielt werden da die Cliphoster nicht aufgelöst werden können
#__createMainMenuEntry(oGui, 'Clips', 669, sSecurityValue)
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('displaySearch')
oGuiElement.setTitle('Suche XXX Streams')
params.setParam('searchType', '530')
oGui.addFolder(oGuiElement, params)
__createMainMenuEntry(oGui, 'Amateure', '661&prefixid=Amateure1')
__createMainMenuEntry(oGui, 'Anal', '661&prefixid=Anal')
__createMainMenuEntry(oGui, 'Asia', '661&prefixid=Asia')
__createMainMenuEntry(oGui, 'Black', '661&prefixid=Ebony')
__createMainMenuEntry(oGui, 'Blowjob', '661&prefixid=Blowjob')
__createMainMenuEntry(oGui, 'Deutsch', '661&prefixid=Deutsch')
__createMainMenuEntry(oGui, 'Fetish', '661&prefixid=Fetish')
__createMainMenuEntry(oGui, 'Große Brüste', '661&prefixid=GrosseBrueste')
__createMainMenuEntry(oGui, 'Gruppensex', '661&prefixid=Gruppensex')
__createMainMenuEntry(oGui, 'Gay', '661&prefixid=Gay')
__createMainMenuEntry(oGui, 'Hardcore', '661&prefixid=Hardcore')
__createMainMenuEntry(oGui, 'International', '661&prefixid=International')
__createMainMenuEntry(oGui, 'Lesben', '661&prefixid=Lesben')
__createMainMenuEntry(oGui, 'Masturbation', '661&prefixid=Masturbation')
__createMainMenuEntry(oGui, 'Teens', '661&prefixid=Teens')
oGui.setEndOfDirectory()
def showHDMovies():
oGui = cGui()
sUrl = 'http://gstream.to/search.php?do=process&prefixchoice[]=hd'
oRequest = cRequestHandler(sUrl, caching = False)
oRequest.ignoreDiscard(True)
oRequest.request()
sUrl = oRequest.getRealUrl()
__parseMovieResultSite(oGui, sUrl)
oGui.setEndOfDirectory()
def displaySearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
_search(oGui, sSearchText)
else:
return
oGui.setEndOfDirectory()
def _search(oGui, sSearchText):
__login()
params = ParameterHandler()
sSearchType = params.getValue('searchType')
if not sSearchType:
sSearchType = '528'
sUrl = URL_SEARCH+'?do=process&childforums=1&do=process&exactname=1&forumchoice[]='+sSearchType+\
'&query=' + str(sSearchText) + '&quicksearch=1&s=&securitytoken=guest&titleonly=1'
oRequest = cRequestHandler(sUrl, caching = False)
oRequest.ignoreDiscard(True)
oRequest.request()
sUrl = oRequest.getRealUrl()
__parseMovieResultSite(oGui, sUrl)
def parseMovieResultSite():
oGui = cGui()
params = ParameterHandler()
if (params.exist('siteUrl')):
siteUrl = params.getValue('siteUrl')
normalySiteUrl = params.getValue('normalySiteUrl')
iPage = params.getValue('iPage')
__parseMovieResultSite(oGui, siteUrl, normalySiteUrl, iPage)
oGui.setEndOfDirectory()
def __parseMovieResultSite(oGui, siteUrl, normalySiteUrl = '', iPage = 1):
if not normalySiteUrl:
normalySiteUrl = siteUrl+'&page='
params = ParameterHandler()
sPattern = 'class="p1".*?<img class="large" src="(http://[^"]+)".*?<a href="[^"]+" id=".*?([^"_]+)"(.*?)>([^<]+)</a>(.*?)</tr>'
#sPattern = 'class="alt1Active".*?<a href="(forumdisplay.php[^"]+)".*?>([^<]+)<.*?(src="([^"]+)|</td>).*?</tr>' #Serien
# request
sHtmlContent = __getHtmlContent(sUrl = siteUrl)
# parse content
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
return
total = len(aResult[1])
for img, link, hdS, title, yearS in aResult[1]:
sMovieTitle = title.replace('&','&')
sTitle = sMovieTitle
sUrl = URL_SHOW_MOVIE + str(link)
year = ''
aResult = oParser.parse(yearS, ' ([0-9]{4}) -')
if aResult[0]:
year = aResult[1][0]
aResult = oParser.parse(hdS, '(title="HD Quali")')
if aResult[0]:
sTitle = sTitle + ' [HD]'
oGuiElement = cGuiElement(sTitle,SITE_IDENTIFIER,'getHosters')
oGuiElement.setMediaType('movie')
oGuiElement.setYear(year)
oGuiElement.setThumbnail(img)
params.setParam('movieUrl', sUrl)
params.setParam('sMovieTitle', sMovieTitle)
oGui.addFolder(oGuiElement, params, bIsFolder = False, iTotal = total)
# check for next site
iTotalPages = __getTotalPages(iPage, sHtmlContent)
if (iTotalPages >= int(iPage)+1):
params = ParameterHandler()
params.setParam('iPage', int(iPage)+1)
params.setParam('normalySiteUrl', normalySiteUrl)
params.setParam('siteUrl', normalySiteUrl+str(int(iPage)+1))
oGui.addNextPage(SITE_IDENTIFIER,'parseMovieResultSite', params, iTotalPages)
if iTotalPages > 1:
oGuiElement = cGuiElement('Go to page x of '+str(iTotalPages),SITE_IDENTIFIER,'gotoPage')
params = ParameterHandler()
oGui.addFolder(oGuiElement, params)
oGui.setView('movies')
def gotoPage():
oGui = cGui()
pageNum = oGui.showNumpad()
if not pageNum:
return
params = ParameterHandler()
siteUrl = params.getValue('normalySiteUrl')+pageNum
__parseMovieResultSite(oGui, siteUrl, iPage = int(pageNum))
oGui.setEndOfDirectory()
def __getTotalPages(iPage, sHtml):
sPattern = '>Seite [0-9]+ von ([0-9]+)<'
oParser = cParser()
aResult = oParser.parse(sHtml, sPattern)
if (aResult[0] == True):
iTotalCount = int(aResult[1][0])
return iTotalCount
return 0
def __createDisplayStart(iPage):
return (20 * int(iPage)) - 20
def __createInfo(oGui, sHtmlContent):
sPattern = '<td class="alt1" id="td_post_.*?<img src="([^"]+)".*?<b>Inhalt:</b>(.*?)<br />'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
sThumbnail = str(aEntry[0])
sDescription = cUtil().removeHtmlTags(str(aEntry[1])).replace('\t', '').strip()
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setTitle('info (press Info Button)')
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setFunction('dummyFolder')
oGuiElement.setDescription(sDescription)
oGui.addFolder(oGuiElement)
def showAdult():
oConfig = cConfig()
if oConfig.getSetting('showAdult')=='true':
return True
return False
def dummyFolder():
oGui = cGui()
oGui.setEndOfDirectory()
#### Hosterhandling
def getHosters():
hosters = []
params = ParameterHandler()
if (params.exist('movieUrl') and params.exist('sMovieTitle')):
sSiteUrl = params.getValue('movieUrl')
sMovieTitle = params.getValue('sMovieTitle')
sHtmlContent = __getHtmlContent(sUrl = sSiteUrl)
sPattern = 'id="ame_noshow_post.*?<a href="([^"]+)" title="[^"]+" target="_blank">([^<]+)</a>'
aResult = cParser().parse(sHtmlContent, sPattern)
if aResult[0] == True:
for aEntry in aResult[1]:
sUrl = aEntry[0]
# extract hoster domainname
if 'gstream.to/secure/' in sUrl :
sHoster = sUrl.split('secure/')[-1].split('/')[0].split('.')[-2]
else:
sHoster = sUrl.split('//')[-1].split('/')[0].split('.')[-2]
hoster = {}
hoster['link'] = sUrl
hoster['name'] = sHoster
hosters.append(hoster)
hosters.append('getHosterUrl')
return hosters
def getHosterUrl(sUrl = False):
params = ParameterHandler()
if not sUrl:
sUrl = params.getValue('url')
results = []
if 'gstream.to/secure/' in sUrl :
sHoster = sUrl.split('secure/')[-1].split('/')[0]
oRequest = cRequestHandler(sUrl, False)
oRequest.addHeaderEntry('Cookie', params.getValue('securityCookie'))
oRequest.addHeaderEntry('Referer', params.getValue('movieUrl'))
oRequest.ignoreDiscard(True)
try:
oRequest.request()
sUrl = oRequest.getRealUrl()
sUrl = 'http://%s%s' % (sHoster, sUrl.split(sHoster)[-1])
except:
pass
result = {}
result['streamUrl'] = sUrl
result['resolved'] = False
results.append(result)
return results | gpl-2.0 |
shirtsgroup/InterMol | doc/conf.py | 3 | 8795 | # -*- coding: utf-8 -*-
#
# InterMol documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 23 13:01:05 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import intermol.version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'InterMol'
copyright = u'2015, Christoph Klein, Christopher Lee, Ellen Zhong, and Michael Shirts'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = intermol.version.short_version
# The full version, including alpha/beta/rc tags.
release = intermol.version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'InterMoldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'InterMol.tex', u'InterMol Documentation',
u'Christoph Klein, Christopher Lee, Ellen Zhong, and Michael Shirts', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'intermol', u'InterMol Documentation',
[u'Christoph Klein, Christopher Lee, Ellen Zhong, and Michael Shirts'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'InterMol', u'InterMol Documentation',
u'Christoph Klein, Christopher Lee, Ellen Zhong, and Michael Shirts', 'InterMol', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
ForkedReposBak/mxnet | python/mxnet/symbol/numpy/linalg.py | 9 | 35356 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for operators used in Gluon dispatched by F=symbol."""
import numpy as _np
from . import _symbol
from . import _op as _mx_sym_np # pylint: disable=unused-import
from . import _internal as _npi
__all__ = ['norm', 'svd', 'cholesky', 'qr', 'inv', 'det', 'slogdet', 'solve', 'tensorinv', 'tensorsolve',
'pinv', 'eigvals', 'eig', 'eigvalsh', 'eigh', 'lstsq', 'matrix_rank']
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
Parameters
M : {(M,), (..., M, N)} _Symbol
Input vector or stack of matrices.
tol : (...) _Symbol, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
Returns
-------
rank : (...) _Symbol
Rank of M.
"""
finfo_eps_32 = _np.finfo(_np.float32).eps
finfo_eps_64 = _np.finfo(_np.float64).eps
if tol is None:
return _npi.matrix_rank_none_tol(M, finfo_eps_32, finfo_eps_64, hermitian)
else:
return _npi.matrix_rank(M, tol, hermitian)
def lstsq(a, b, rcond='warn'):
r"""
Return the least-squares solution to a linear matrix equation.
Solves the equation :math:`a x = b` by computing a vector `x` that
minimizes the squared Euclidean 2-norm :math:`\| b - a x \|^2_2`.
The equation may be under-, well-, or over-determined (i.e., the
number of linearly independent rows of `a` can be less than, equal
to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation.
Parameters
----------
a : (M, N) _Symbol
"Coefficient" matrix.
b : {(M,), (M, K)} _Symbol
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`
The default of ``warn`` or ``-1`` will use the machine precision as
`rcond` parameter. The default of ``None`` will use the machine
precision times `max(M, N)` as `rcond` parameter.
Returns
-------
x : {(N,), (N, K)} _Symbol
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} _Symbol
Sums of residuals.
Squared Euclidean 2-norm for each column in ``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) _Symbol
Singular values of `a`.
Raises
------
MXNetError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
"""
new_default = False
finfo_eps_32 = _np.finfo(_np.float32).eps
finfo_eps_64 = _np.finfo(_np.float64).eps
if rcond is None:
rcond = 1
new_default = True
if rcond == "warn":
rcond = -1
x, residuals, rank, s = _npi.lstsq(a, b, rcond=rcond, finfoEps32=finfo_eps_32, finfoEps64=finfo_eps_64, new_default=new_default) # pylint: disable=line-too-long
return (x, residuals, rank, s)
def pinv(a, rcond=1e-15, hermitian=False):
r"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (..., M, N) ndarray
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) {float or ndarray of float}, optional
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
MXNetError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(2, 3)
>>> pinv_a = np.linalg.pinv(a)
>>> (a - np.dot(a, np.dot(pinv_a, a))).sum()
array(0.)
>>> (pinv_a - np.dot(pinv_a, np.dot(a, pinv_a))).sum()
array(0.)
"""
if hermitian is True:
raise NotImplementedError("hermitian is not supported yet...")
if _symbol._np.isscalar(rcond):
return _npi.pinv_scalar_rcond(a, rcond, hermitian)
return _npi.pinv(a, rcond, hermitian)
# pylint: disable=too-many-return-statements
def norm(x, ord=None, axis=None, keepdims=False):
r"""Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : _Symbol
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
Returns
-------
n : _Symbol
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' -- --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 -- as below
-2 -- as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
When you want to operate norm for matrices,if you ord is (-1, 1, inf, -inf),
you must give you axis, it is not support default axis.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from mxnet import np
>>> a = np.arange(9) - 4
>>> a
array([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> b = a.reshape((3, 3))
>>> b
array([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> np.linalg.norm(a)
array(7.745967)
>>> np.linalg.norm(b)
array(7.745967)
>>> np.linalg.norm(b, 'fro')
array(7.745967)
>>> np.linalg.norm(a, 'inf')
array(4.)
>>> np.linalg.norm(b, 'inf', axis=(0, 1))
array(9.)
>>> np.linalg.norm(a, '-inf')
array(0.)
>>> np.linalg.norm(b, '-inf', axis=(0, 1))
array(2.)
>>> np.linalg.norm(a, 1)
array(20.)
>>> np.linalg.norm(b, 1, axis=(0, 1))
array(7.)
>>> np.linalg.norm(a, -1)
array(0.)
>>> np.linalg.norm(b, -1, axis=(0, 1))
array(6.)
>>> np.linalg.norm(a, 2)
array(7.745967)
>>> np.linalg.norm(a, -2)
array(0.)
>>> np.linalg.norm(a, 3)
array(5.8480353)
>>> np.linalg.norm(a, -3)
array(0.)
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> np.linalg.norm(c, axis=0)
array([1.4142135, 2.236068 , 5. ])
>>> np.linalg.norm(c, axis=1)
array([3.7416573, 4.2426405])
>>> np.linalg.norm(c, ord=1, axis=1)
array([6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> np.linalg.norm(m, axis=(1,2))
array([ 3.7416573, 11.224973 ])
>>> np.linalg.norm(m[0, :, :]), np.linalg.norm(m[1, :, :])
(array(3.7416573), array(11.224973))
"""
if axis is None and ord is None:
return _npi.norm(x, ord=2, axis=None, keepdims=keepdims, flag=-2)
if axis is None or isinstance(axis, (int, tuple)): # pylint: disable=too-many-nested-blocks
if axis is not None:
if isinstance(axis, int):
axis = (axis, )
if len(axis) == 2:
if ord in ['inf', '-inf']:
row_axis, col_axis = axis
if not keepdims:
if row_axis > col_axis:
row_axis -= 1
if ord == 'inf':
return _npi.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).max(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long
else:
return _npi.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).min(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long
if ord in [1, -1]:
row_axis, col_axis = axis
if not keepdims:
if row_axis < col_axis:
col_axis -= 1
if ord == 1:
return _npi.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).max(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long
elif ord == -1:
return _npi.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).min(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long
if ord in [2, -2]:
return _npi.norm(x, ord=ord, axis=axis, keepdims=keepdims, flag=0)
if ord is None:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=1)
if ord == 'inf':
return _npi.max(_symbol.abs(x), axis=axis, keepdims=keepdims)
#return _npi.norm(x, ord=float('inf'), axis=axis, keepdims=keepdims, flag=3)
elif ord == '-inf':
return _npi.min(_symbol.abs(x), axis=axis, keepdims=keepdims)
#return _npi.norm(x, ord=-float('inf'), axis=axis, keepdims=keepdims, flag=4)
elif ord is None:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=1)
elif ord == 2:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=-1)
elif ord == 'nuc':
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=2)
elif ord in ['fro', 'f']:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=1)
else:
return _npi.norm(x, ord=ord, axis=axis, keepdims=keepdims, flag=-1)
else:
raise TypeError("'axis' must be None, an integer or a tuple of integers.")
# pylint: enable=too-many-return-statements
def svd(a):
r"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``ut @ np.diag(s) @ v``,
where `ut` and `v` are 2D orthonormal arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) _Symbol
A real array with ``a.ndim >= 2`` and ``M <= N``.
Returns
-------
ut: (..., M, M) _Symbol
Orthonormal array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
s : (..., M) _Symbol
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
v : (..., M, N) _Symbol
Orthonormal array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
Notes
-----
The decomposition is performed using LAPACK routine ``_gesvd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U^T S V`, where :math:`A = a`, :math:`U^T = ut`,
:math:`S= \mathtt{np.diag}(s)` and :math:`V = v`. The 1D array `s`
contains the singular values of `a` and `ut` and `v` are orthonormal. The rows
of `v` are the eigenvectors of :math:`A^T A` and the columns of `ut` are
the eigenvectors of :math:`A A^T`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
The sign of rows of `u` and `v` are determined as described in
`Auto-Differentiating Linear Algebra <https://arxiv.org/pdf/1710.08717.pdf>`_.
If `a` has more than two dimensions, then broadcasting rules apply.
This means that SVD is working in "stacked" mode: it iterates over
all indices of the first ``a.ndim - 2`` dimensions and for each
combination SVD is applied to the last two indices. The matrix `a`
can be reconstructed from the decomposition with either
``(ut * s[..., None, :]) @ v`` or
``ut @ (s[..., None] * v)``. (The ``@`` operator denotes batch matrix multiplication)
This function differs from the original `numpy.linalg.svd
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html>`_ in
the following way(s):
- The sign of rows of `u` and `v` may differ.
- Does not support complex input.
"""
return _npi.svd(a)
def cholesky(a):
r"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.T`, of the square matrix `a`,
where `L` is lower-triangular and .T is the transpose operator. `a` must be
symmetric and positive-definite. Only `L` is actually returned. Complex-valued
input is currently not supported.
Parameters
----------
a : (..., M, M) ndarray
Symmetric, positive-definite input matrix.
Returns
-------
L : (..., M, M) ndarray
Lower-triangular Cholesky factor of `a`.
Raises
------
MXNetError
If the decomposition fails, for example, if `a` is not positive-definite.
Notes
-----
Broadcasting rules apply.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \mathbf{x} = \mathbf{b}
(when `A` is both symmetric and positive-definite).
First, we solve for :math:`\mathbf{y}` in
.. math:: L \mathbf{y} = \mathbf{b},
and then for :math:`\mathbf{x}` in
.. math:: L.T \mathbf{x} = \mathbf{y}.
Examples
--------
>>> A = np.array([[16, 4], [4, 10]])
>>> A
array([[16., 4.],
[ 4., 10.]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[4., 0.],
[1., 3.]])
>>> np.dot(L, L.T)
array([[16., 4.],
[ 4., 10.]])
"""
return _npi.cholesky(a, True)
def qr(a, mode='reduced'):
r"""
Compute the qr factorization of a matrix a.
Factor the matrix a as qr, where q is orthonormal and r is upper-triangular.
Parameters
----------
a : (..., M, N) _Symbol
Matrix or stack of matrices to be qr factored.
mode: {‘reduced’, ‘complete’, ‘r’, ‘raw’, ‘full’, ‘economic’}, optional
Only default mode, 'reduced', is implemented. If K = min(M, N), then
* 'reduced’ : returns q, r with dimensions (M, K), (K, N) (default)
Returns
-------
q : (..., M, K) _Symbol
A matrix or stack of matrices with K orthonormal columns, with K = min(M, N).
r : (..., K, N) _Symbol
A matrix or stack of upper triangular matrices.
Raises
------
MXNetError
If factoring fails.
Examples
--------
>>> from mxnet import np
>>> a = np.random.uniform(-10, 10, (2, 2))
>>> q, r = np.linalg.qr(a)
>>> q
array([[-0.22121978, -0.97522414],
[-0.97522414, 0.22121954]])
>>> r
array([[-4.4131265 , -7.1255064 ],
[ 0. , -0.28771925]])
>>> a = np.random.uniform(-10, 10, (2, 3))
>>> q, r = np.linalg.qr(a)
>>> q
array([[-0.28376842, -0.9588929 ],
[-0.9588929 , 0.28376836]])
>>> r
array([[-7.242763 , -0.5673361 , -2.624416 ],
[ 0. , -7.297918 , -0.15949416]])
>>> a = np.random.uniform(-10, 10, (3, 2))
>>> q, r = np.linalg.qr(a)
>>> q
array([[-0.34515655, 0.10919492],
[ 0.14765628, -0.97452265],
[-0.92685735, -0.19591334]])
>>> r
array([[-8.453794, 8.4175 ],
[ 0. , 5.430561]])
"""
if mode is not None and mode != 'reduced':
raise NotImplementedError("Only default mode='reduced' is implemented.")
return _npi.qr(a)
def inv(a):
r"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) ndarray
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray
(Multiplicative) inverse of the matrix `a`.
Raises
------
MXNetError
If `a` is not square or inversion fails.
Examples
--------
>>> from mxnet import np
>>> a = np.array([[1., 2.], [3., 4.]])
array([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> np.linalg.inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.2500001 , 0.75000006],
[ 0.75000006, -0.25000003]]])
"""
return _npi.inv(a)
def det(a):
r"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) ndarray
Input array to compute determinants for.
Returns
-------
det : (...) ndarray
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
return _npi.det(a)
def slogdet(a):
r"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) ndarray
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) ndarray
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1., 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1., -1151.2925464970228)
"""
return _npi.slogdet(a)
def solve(a, b):
r"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) ndarray
Coefficient matrix.
b : {(..., M,), (..., M, K)}, ndarray
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
MXNetError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
return _npi.solve(a, b)
def tensorinv(a, ind=2):
r"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
MXNetError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
return _npi.tensorinv(a, ind)
def tensorsolve(a, b, axes=None):
r"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : ndarray
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : ndarray
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
MXNetError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
return _npi.tensorsolve(a, b, axes)
def eigvals(a):
r"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) ndarray
A real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigh : eigenvalues and eigenvectors of a real symmetric array.
eigvalsh : eigenvalues of a real symmetric.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
This function differs from the original `numpy.linalg.eigvals
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigvals.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eigvals(a)
def eigvalsh(a, UPLO='L'):
r"""
Compute the eigenvalues real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) ndarray
A real-valued matrix whose eigenvalues are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``.
This function differs from the original `numpy.linalg.eigvalsh
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigvalsh.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eigvalsh(a, UPLO)
def eig(a):
r"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) ndarray
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) ndarray
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered.
v : (..., M, M) ndarray
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric array.
eigvalsh : eigenvalues of a real symmetric.
Notes
-----
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent.
This function differs from the original `numpy.linalg.eig
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eig(a)
def eigh(a, UPLO='L'):
r"""
Return the eigenvalues and eigenvectors real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) ndarray
real symmetric matrices whose eigenvalues and eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvals : eigenvalues of a non-symmetric array.
eigvalsh : eigenvalues of a real symmetric.
Notes
-----
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``.
This function differs from the original `numpy.linalg.eigh
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eigh(a, UPLO)
| apache-2.0 |
peguin40/zulip | zerver/templatetags/app_filters.py | 7 | 2706 | from django.conf import settings
from django.template import Library
from django.utils.safestring import mark_safe
from django.utils.lru_cache import lru_cache
from zerver.lib.utils import force_text
import zerver.lib.bugdown.fenced_code
import markdown
import markdown.extensions.codehilite
import markdown.extensions.toc
register = Library()
def and_n_others(values, limit):
# type: (List[str], int) -> str
# A helper for the commonly appended "and N other(s)" string, with
# the appropriate pluralization.
return " and %d other%s" % (len(values) - limit,
"" if len(values) == limit + 1 else "s")
@register.filter(name='display_list', is_safe=True)
def display_list(values, display_limit):
# type: (List[str], int) -> str
"""
Given a list of values, return a string nicely formatting those values,
summarizing when you have more than `display_limit`. Eg, for a
`display_limit` of 3 we get the following possible cases:
Jessica
Jessica and Waseem
Jessica, Waseem, and Tim
Jessica, Waseem, Tim, and 1 other
Jessica, Waseem, Tim, and 2 others
"""
if len(values) == 1:
# One value, show it.
display_string = "%s" % (values[0],)
elif len(values) <= display_limit:
# Fewer than `display_limit` values, show all of them.
display_string = ", ".join(
"%s" % (value,) for value in values[:-1])
display_string += " and %s" % (values[-1],)
else:
# More than `display_limit` values, only mention a few.
display_string = ", ".join(
"%s" % (value,) for value in values[:display_limit])
display_string += and_n_others(values, display_limit)
return display_string
md_engine = None
@lru_cache(512 if settings.PRODUCTION else 0)
@register.filter(name='render_markdown_path', is_safe=True)
def render_markdown_path(markdown_file_path):
# type: (str) -> str
"""Given a path to a markdown file, return the rendered html.
Note that this assumes that any HTML in the markdown file is
trusted; it is intended to be used for documentation, not user
data."""
global md_engine
if md_engine is None:
md_engine = markdown.Markdown(extensions=[
markdown.extensions.toc.makeExtension(),
markdown.extensions.codehilite.makeExtension(
linenums=False,
guess_lang=False
),
zerver.lib.bugdown.fenced_code.makeExtension(),
])
md_engine.reset()
markdown_string = force_text(open(markdown_file_path).read())
html = markdown.markdown(md_engine.convert(markdown_string))
return mark_safe(html)
| apache-2.0 |
basho-labs/riak-cxx-client | deps/boost-1.47.0/tools/build/v2/tools/common.py | 10 | 31622 | # Status: being ported by Steven Watanabe
# Base revision: 47174
#
# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
""" Provides actions common to all toolsets, such as creating directories and
removing files.
"""
import re
import bjam
import os
import os.path
import sys
from b2.build import feature
from b2.util.utility import *
from b2.util import path
__re__before_first_dash = re.compile ('([^-]*)-')
def reset ():
""" Clear the module state. This is mainly for testing purposes.
Note that this must be called _after_ resetting the module 'feature'.
"""
global __had_unspecified_value, __had_value, __declared_subfeature
global __init_loc
global __all_signatures, __debug_configuration, __show_configuration
# Stores toolsets without specified initialization values.
__had_unspecified_value = {}
# Stores toolsets with specified initialization values.
__had_value = {}
# Stores toolsets with declared subfeatures.
__declared_subfeature = {}
# Stores all signatures of the toolsets.
__all_signatures = {}
# Stores the initialization locations of each toolset
__init_loc = {}
__debug_configuration = '--debug-configuration' in bjam.variable('ARGV')
__show_configuration = '--show-configuration' in bjam.variable('ARGV')
global __executable_path_variable
OS = bjam.call("peek", [], "OS")[0]
if OS == "NT":
# On Windows the case and capitalization of PATH is not always predictable, so
# let's find out what variable name was really set.
for n in sys.environ:
if n.lower() == "path":
__executable_path_variable = n
break
else:
__executable_path_variable = "PATH"
m = {"NT": __executable_path_variable,
"CYGWIN": "PATH",
"MACOSX": "DYLD_LIBRARY_PATH",
"AIX": "LIBPATH"}
global __shared_library_path_variable
__shared_library_path_variable = m.get(OS, "LD_LIBRARY_PATH")
reset()
def shared_library_path_variable():
return __shared_library_path_variable
# ported from trunk@47174
class Configurations(object):
"""
This class helps to manage toolset configurations. Each configuration
has a unique ID and one or more parameters. A typical example of a unique ID
is a condition generated by 'common.check-init-parameters' rule. Other kinds
of IDs can be used. Parameters may include any details about the configuration
like 'command', 'path', etc.
A toolset configuration may be in one of the following states:
- registered
Configuration has been registered (e.g. by autodetection code) but has
not yet been marked as used, i.e. 'toolset.using' rule has not yet been
called for it.
- used
Once called 'toolset.using' rule marks the configuration as 'used'.
The main difference between the states above is that while a configuration is
'registered' its options can be freely changed. This is useful in particular
for autodetection code - all detected configurations may be safely overwritten
by user code.
"""
def __init__(self):
self.used_ = set()
self.all_ = set()
self.params = {}
def register(self, id):
"""
Registers a configuration.
Returns True if the configuration has been added and False if
it already exists. Reports an error if the configuration is 'used'.
"""
if id in self.used_:
#FIXME
errors.error("common: the configuration '$(id)' is in use")
if id not in self.all_:
self.all_ += [id]
# Indicate that a new configuration has been added.
return True
else:
return False
def use(self, id):
"""
Mark a configuration as 'used'.
Returns True if the state of the configuration has been changed to
'used' and False if it the state wasn't changed. Reports an error
if the configuration isn't known.
"""
if id not in self.all_:
#FIXME:
errors.error("common: the configuration '$(id)' is not known")
if id not in self.used_:
self.used_ += [id]
# indicate that the configuration has been marked as 'used'
return True
else:
return False
def all(self):
""" Return all registered configurations. """
return self.all_
def used(self):
""" Return all used configurations. """
return self.used_
def get(self, id, param):
""" Returns the value of a configuration parameter. """
self.params_.getdefault(param, {}).getdefault(id, None)
def set (self, id, param, value):
""" Sets the value of a configuration parameter. """
self.params_.setdefault(param, {})[id] = value
# Ported from trunk@47174
def check_init_parameters(toolset, requirement, *args):
""" The rule for checking toolset parameters. Trailing parameters should all be
parameter name/value pairs. The rule will check that each parameter either has
a value in each invocation or has no value in each invocation. Also, the rule
will check that the combination of all parameter values is unique in all
invocations.
Each parameter name corresponds to a subfeature. This rule will declare a
subfeature the first time a non-empty parameter value is passed and will
extend it with all the values.
The return value from this rule is a condition to be used for flags settings.
"""
# The type checking here is my best guess about
# what the types should be.
assert(isinstance(toolset, str))
assert(isinstance(requirement, str) or requirement is None)
sig = toolset
condition = replace_grist(toolset, '<toolset>')
subcondition = []
for arg in args:
assert(isinstance(arg, tuple))
assert(len(arg) == 2)
name = arg[0]
value = arg[1]
assert(isinstance(name, str))
assert(isinstance(value, str) or value is None)
str_toolset_name = str((toolset, name))
# FIXME: is this the correct translation?
### if $(value)-is-not-empty
if value is not None:
condition = condition + '-' + value
if __had_unspecified_value.has_key(str_toolset_name):
raise BaseException("'%s' initialization: parameter '%s' inconsistent\n" \
"no value was specified in earlier initialization\n" \
"an explicit value is specified now" % (toolset, name))
# The logic below is for intel compiler. It calls this rule
# with 'intel-linux' and 'intel-win' as toolset, so we need to
# get the base part of toolset name.
# We can't pass 'intel' as toolset, because it that case it will
# be impossible to register versionles intel-linux and
# intel-win of specific version.
t = toolset
m = __re__before_first_dash.match(toolset)
if m:
t = m.group(1)
if not __had_value.has_key(str_toolset_name):
if not __declared_subfeature.has_key(str((t, name))):
feature.subfeature('toolset', t, name, [], ['propagated'])
__declared_subfeature[str((t, name))] = True
__had_value[str_toolset_name] = True
feature.extend_subfeature('toolset', t, name, [value])
subcondition += ['<toolset-' + t + ':' + name + '>' + value ]
else:
if __had_value.has_key(str_toolset_name):
raise BaseException ("'%s' initialization: parameter '%s' inconsistent\n" \
"an explicit value was specified in an earlier initialization\n" \
"no value is specified now" % (toolset, name))
__had_unspecified_value[str_toolset_name] = True
if value == None: value = ''
sig = sig + value + '-'
if __all_signatures.has_key(sig):
message = "duplicate initialization of '%s' with the following parameters: " % toolset
for arg in args:
name = arg[0]
value = arg[1]
if value == None: value = '<unspecified>'
message += "'%s' = '%s'\n" % (name, value)
raise BaseException(message)
__all_signatures[sig] = True
# FIXME
__init_loc[sig] = "User location unknown" #[ errors.nearest-user-location ] ;
# If we have a requirement, this version should only be applied under that
# condition. To accomplish this we add a toolset requirement that imposes
# the toolset subcondition, which encodes the version.
if requirement:
r = ['<toolset>' + toolset, requirement]
r = ','.join(r)
toolset.add_requirements([r + ':' + c for c in subcondition])
# We add the requirements, if any, to the condition to scope the toolset
# variables and options to this specific version.
condition = [condition]
if requirement:
condition += [requirement]
if __show_configuration:
print "notice:", condition
return ['/'.join(condition)]
# Ported from trunk@47077
def get_invocation_command_nodefault(
toolset, tool, user_provided_command=[], additional_paths=[], path_last=False):
"""
A helper rule to get the command to invoke some tool. If
'user-provided-command' is not given, tries to find binary named 'tool' in
PATH and in the passed 'additional-path'. Otherwise, verifies that the first
element of 'user-provided-command' is an existing program.
This rule returns the command to be used when invoking the tool. If we can't
find the tool, a warning is issued. If 'path-last' is specified, PATH is
checked after 'additional-paths' when searching for 'tool'.
"""
assert(isinstance(toolset, str))
assert(isinstance(tool, str))
assert(isinstance(user_provided_command, list))
if additional_paths is not None:
assert(isinstance(additional_paths, list))
assert(all([isinstance(path, str) for path in additional_paths]))
assert(all(isinstance(path, str) for path in additional_paths))
assert(isinstance(path_last, bool))
if not user_provided_command:
command = find_tool(tool, additional_paths, path_last)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization: can't find tool, tool"
#FIXME
#print "warning: initialized from" [ errors.nearest-user-location ] ;
else:
command = check_tool(user_provided_command)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization:"
print "warning: can't find user-provided command", user_provided_command
#FIXME
#ECHO "warning: initialized from" [ errors.nearest-user-location ]
assert(isinstance(command, str))
return command
# ported from trunk@47174
def get_invocation_command(toolset, tool, user_provided_command = [],
additional_paths = [], path_last = False):
""" Same as get_invocation_command_nodefault, except that if no tool is found,
returns either the user-provided-command, if present, or the 'tool' parameter.
"""
assert(isinstance(toolset, str))
assert(isinstance(tool, str))
assert(isinstance(user_provided_command, list))
if additional_paths is not None:
assert(isinstance(additional_paths, list))
assert(all([isinstance(path, str) for path in additional_paths]))
assert(isinstance(path_last, bool))
result = get_invocation_command_nodefault(toolset, tool,
user_provided_command,
additional_paths,
path_last)
if not result:
if user_provided_command:
result = user_provided_command[0]
else:
result = tool
assert(isinstance(result, str))
return result
# ported from trunk@47281
def get_absolute_tool_path(command):
"""
Given an invocation command,
return the absolute path to the command. This works even if commnad
has not path element and is present in PATH.
"""
if os.path.dirname(command):
return os.path.dirname(command)
else:
programs = path.programs_path()
m = path.glob(programs, [command, command + '.exe' ])
if not len(m):
print "Could not find:", command, "in", programs
return os.path.dirname(m[0])
# ported from trunk@47174
def find_tool(name, additional_paths = [], path_last = False):
""" Attempts to find tool (binary) named 'name' in PATH and in
'additional-paths'. If found in path, returns 'name'. If
found in additional paths, returns full name. If the tool
is found in several directories, returns the first path found.
Otherwise, returns the empty string. If 'path_last' is specified,
path is checked after 'additional_paths'.
"""
assert(isinstance(name, str))
assert(isinstance(additional_paths, list))
assert(isinstance(path_last, bool))
programs = path.programs_path()
match = path.glob(programs, [name, name + '.exe'])
additional_match = path.glob(additional_paths, [name, name + '.exe'])
result = []
if path_last:
result = additional_match
if not result and match:
result = match
else:
if match:
result = match
elif additional_match:
result = additional_match
if result:
return path.native(result[0])
else:
return ''
#ported from trunk@47281
def check_tool_aux(command):
""" Checks if 'command' can be found either in path
or is a full name to an existing file.
"""
assert(isinstance(command, str))
dirname = os.path.dirname(command)
if dirname:
if os.path.exists(command):
return command
# Both NT and Cygwin will run .exe files by their unqualified names.
elif on_windows() and os.path.exists(command + '.exe'):
return command
# Only NT will run .bat files by their unqualified names.
elif os_name() == 'NT' and os.path.exists(command + '.bat'):
return command
else:
paths = path.programs_path()
if path.glob(paths, [command]):
return command
# ported from trunk@47281
def check_tool(command):
""" Checks that a tool can be invoked by 'command'.
If command is not an absolute path, checks if it can be found in 'path'.
If comand is absolute path, check that it exists. Returns 'command'
if ok and empty string otherwise.
"""
assert(isinstance(command, list))
assert(all(isinstance(c, str) for c in command))
#FIXME: why do we check the first and last elements????
if check_tool_aux(command[0]) or check_tool_aux(command[-1]):
return command
# ported from trunk@47281
def handle_options(tool, condition, command, options):
""" Handle common options for toolset, specifically sets the following
flag variables:
- CONFIG_COMMAND to 'command'
- OPTIOns for compile to the value of <compileflags> in options
- OPTIONS for compile.c to the value of <cflags> in options
- OPTIONS for compile.c++ to the value of <cxxflags> in options
- OPTIONS for compile.fortran to the value of <fflags> in options
- OPTIONs for link to the value of <linkflags> in options
"""
from b2.build import toolset
assert(isinstance(tool, str))
assert(isinstance(condition, list))
assert(isinstance(command, str))
assert(isinstance(options, list))
assert(command)
toolset.flags(tool, 'CONFIG_COMMAND', condition, [command])
toolset.flags(tool + '.compile', 'OPTIONS', condition, feature.get_values('<compileflags>', options))
toolset.flags(tool + '.compile.c', 'OPTIONS', condition, feature.get_values('<cflags>', options))
toolset.flags(tool + '.compile.c++', 'OPTIONS', condition, feature.get_values('<cxxflags>', options))
toolset.flags(tool + '.compile.fortran', 'OPTIONS', condition, feature.get_values('<fflags>', options))
toolset.flags(tool + '.link', 'OPTIONS', condition, feature.get_values('<linkflags>', options))
# ported from trunk@47281
def get_program_files_dir():
""" returns the location of the "program files" directory on a windows
platform
"""
ProgramFiles = bjam.variable("ProgramFiles")
if ProgramFiles:
ProgramFiles = ' '.join(ProgramFiles)
else:
ProgramFiles = "c:\\Program Files"
return ProgramFiles
# ported from trunk@47281
def rm_command():
return __RM
# ported from trunk@47281
def copy_command():
return __CP
# ported from trunk@47281
def variable_setting_command(variable, value):
"""
Returns the command needed to set an environment variable on the current
platform. The variable setting persists through all following commands and is
visible in the environment seen by subsequently executed commands. In other
words, on Unix systems, the variable is exported, which is consistent with the
only possible behavior on Windows systems.
"""
assert(isinstance(variable, str))
assert(isinstance(value, str))
if os_name() == 'NT':
return "set " + variable + "=" + value + os.linesep
else:
# (todo)
# The following does not work on CYGWIN and needs to be fixed. On
# CYGWIN the $(nl) variable holds a Windows new-line \r\n sequence that
# messes up the executed export command which then reports that the
# passed variable name is incorrect. This is most likely due to the
# extra \r character getting interpreted as a part of the variable name.
#
# Several ideas pop to mind on how to fix this:
# * One way would be to separate the commands using the ; shell
# command separator. This seems like the quickest possible
# solution but I do not know whether this would break code on any
# platforms I I have no access to.
# * Another would be to not use the terminating $(nl) but that would
# require updating all the using code so it does not simply
# prepend this variable to its own commands.
# * I guess the cleanest solution would be to update Boost Jam to
# allow explicitly specifying \n & \r characters in its scripts
# instead of always relying only on the 'current OS native newline
# sequence'.
#
# Some code found to depend on this behaviour:
# * This Boost Build module.
# * __test__ rule.
# * path-variable-setting-command rule.
# * python.jam toolset.
# * xsltproc.jam toolset.
# * fop.jam toolset.
# (todo) (07.07.2008.) (Jurko)
#
# I think that this works correctly in python -- Steven Watanabe
return variable + "=" + value + os.linesep + "export " + variable + os.linesep
def path_variable_setting_command(variable, paths):
"""
Returns a command to sets a named shell path variable to the given NATIVE
paths on the current platform.
"""
assert(isinstance(variable, str))
assert(isinstance(paths, list))
sep = os.path.pathsep
return variable_setting_command(variable, sep.join(paths))
def prepend_path_variable_command(variable, paths):
"""
Returns a command that prepends the given paths to the named path variable on
the current platform.
"""
return path_variable_setting_command(variable,
paths + os.environ.get(variable, "").split(os.pathsep))
def file_creation_command():
"""
Return a command which can create a file. If 'r' is result of invocation, then
'r foobar' will create foobar with unspecified content. What happens if file
already exists is unspecified.
"""
if os_name() == 'NT':
return "echo. > "
else:
return "touch "
#FIXME: global variable
__mkdir_set = set()
__re_windows_drive = re.compile(r'^.*:\$')
def mkdir(engine, target):
# If dir exists, do not update it. Do this even for $(DOT).
bjam.call('NOUPDATE', target)
global __mkdir_set
# FIXME: Where is DOT defined?
#if $(<) != $(DOT) && ! $($(<)-mkdir):
if target != '.' and target not in __mkdir_set:
# Cheesy gate to prevent multiple invocations on same dir.
__mkdir_set.add(target)
# Schedule the mkdir build action.
if os_name() == 'NT':
engine.set_update_action("common.MkDir1-quick-fix-for-windows", target, [])
else:
engine.set_update_action("common.MkDir1-quick-fix-for-unix", target, [])
# Prepare a Jam 'dirs' target that can be used to make the build only
# construct all the target directories.
engine.add_dependency('dirs', target)
# Recursively create parent directories. $(<:P) = $(<)'s parent & we
# recurse until root.
s = os.path.dirname(target)
if os_name() == 'NT':
if(__re_windows_drive.match(s)):
s = ''
if s:
if s != target:
engine.add_dependency(target, s)
mkdir(engine, s)
else:
bjam.call('NOTFILE', s)
__re_version = re.compile(r'^([^.]+)[.]([^.]+)[.]?([^.]*)')
def format_name(format, name, target_type, prop_set):
""" Given a target, as given to a custom tag rule, returns a string formatted
according to the passed format. Format is a list of properties that is
represented in the result. For each element of format the corresponding target
information is obtained and added to the result string. For all, but the
literal, the format value is taken as the as string to prepend to the output
to join the item to the rest of the result. If not given "-" is used as a
joiner.
The format options can be:
<base>[joiner]
:: The basename of the target name.
<toolset>[joiner]
:: The abbreviated toolset tag being used to build the target.
<threading>[joiner]
:: Indication of a multi-threaded build.
<runtime>[joiner]
:: Collective tag of the build runtime.
<version:/version-feature | X.Y[.Z]/>[joiner]
:: Short version tag taken from the given "version-feature"
in the build properties. Or if not present, the literal
value as the version number.
<property:/property-name/>[joiner]
:: Direct lookup of the given property-name value in the
build properties. /property-name/ is a regular expression.
e.g. <property:toolset-.*:flavor> will match every toolset.
/otherwise/
:: The literal value of the format argument.
For example this format:
boost_ <base> <toolset> <threading> <runtime> <version:boost-version>
Might return:
boost_thread-vc80-mt-gd-1_33.dll, or
boost_regex-vc80-gd-1_33.dll
The returned name also has the target type specific prefix and suffix which
puts it in a ready form to use as the value from a custom tag rule.
"""
assert(isinstance(format, list))
assert(isinstance(name, str))
assert(isinstance(target_type, str) or not type)
# assert(isinstance(prop_set, property_set.PropertySet))
if type.is_derived(target_type, 'LIB'):
result = "" ;
for f in format:
grist = get_grist(f)
if grist == '<base>':
result += os.path.basename(name)
elif grist == '<toolset>':
result += join_tag(ungrist(f),
toolset_tag(name, target_type, prop_set))
elif grist == '<threading>':
result += join_tag(ungrist(f),
threading_tag(name, target_type, prop_set))
elif grist == '<runtime>':
result += join_tag(ungrist(f),
runtime_tag(name, target_type, prop_set))
elif grist.startswith('<version:'):
key = grist[len('<version:'):-1]
version = prop_set.get('<' + key + '>')
if not version:
version = key
version = __re_version.match(version)
result += join_tag(ungrist(f), version[1] + '_' + version[2])
elif grist.startswith('<property:'):
key = grist[len('<property:'):-1]
property_re = re.compile('<(' + key + ')>')
p0 = None
for prop in prop_set.raw():
match = property_re.match(prop)
if match:
p0 = match[1]
break
if p0:
p = prop_set.get('<' + p0 + '>')
if p:
assert(len(p) == 1)
result += join_tag(ungrist(f), p)
else:
result += ungrist(f)
result = virtual_target.add_prefix_and_suffix(
''.join(result), target_type, prop_set)
return result
def join_tag(joiner, tag):
if not joiner: joiner = '-'
return joiner + tag
__re_toolset_version = re.compile(r"<toolset.*version>(\d+)[.](\d*)")
def toolset_tag(name, target_type, prop_set):
tag = ''
properties = prop_set.raw()
tools = prop_set.get('<toolset>')
assert(len(tools) == 0)
tools = tools[0]
if tools.startswith('borland'): tag += 'bcb'
elif tools.startswith('como'): tag += 'como'
elif tools.startswith('cw'): tag += 'cw'
elif tools.startswith('darwin'): tag += 'xgcc'
elif tools.startswith('edg'): tag += edg
elif tools.startswith('gcc'):
flavor = prop_set.get('<toolset-gcc:flavor>')
''.find
if flavor.find('mingw') != -1:
tag += 'mgw'
else:
tag += 'gcc'
elif tools == 'intel':
if prop_set.get('<toolset-intel:platform>') == ['win']:
tag += 'iw'
else:
tag += 'il'
elif tools.startswith('kcc'): tag += 'kcc'
elif tools.startswith('kylix'): tag += 'bck'
#case metrowerks* : tag += cw ;
#case mingw* : tag += mgw ;
elif tools.startswith('mipspro'): tag += 'mp'
elif tools.startswith('msvc'): tag += 'vc'
elif tools.startswith('sun'): tag += 'sw'
elif tools.startswith('tru64cxx'): tag += 'tru'
elif tools.startswith('vacpp'): tag += 'xlc'
for prop in properties:
match = __re_toolset_version.match(prop)
if(match):
version = match
break
version_string = None
# For historical reasons, vc6.0 and vc7.0 use different naming.
if tag == 'vc':
if version.group(1) == '6':
# Cancel minor version.
version_string = '6'
elif version.group(1) == '7' and version.group(2) == '0':
version_string = '7'
# On intel, version is not added, because it does not matter and it's the
# version of vc used as backend that matters. Ideally, we'd encode the
# backend version but that would break compatibility with V1.
elif tag == 'iw':
version_string = ''
# On borland, version is not added for compatibility with V1.
elif tag == 'bcb':
version_string = ''
if version_string is None:
version = version.group(1) + version.group(2)
tag += version
return tag
def threading_tag(name, target_type, prop_set):
tag = ''
properties = prop_set.raw()
if '<threading>multi' in properties: tag = 'mt'
return tag
def runtime_tag(name, target_type, prop_set ):
tag = ''
properties = prop_set.raw()
if '<runtime-link>static' in properties: tag += 's'
# This is an ugly thing. In V1, there's a code to automatically detect which
# properties affect a target. So, if <runtime-debugging> does not affect gcc
# toolset, the tag rules won't even see <runtime-debugging>. Similar
# functionality in V2 is not implemented yet, so we just check for toolsets
# which are known to care about runtime debug.
if '<toolset>msvc' in properties \
or '<stdlib>stlport' in properties \
or '<toolset-intel:platform>win' in properties:
if '<runtime-debugging>on' in properties: tag += 'g'
if '<python-debugging>on' in properties: tag += 'y'
if '<variant>debug' in properties: tag += 'd'
if '<stdlib>stlport' in properties: tag += 'p'
if '<stdlib-stlport:iostream>hostios' in properties: tag += 'n'
return tag
## TODO:
##rule __test__ ( )
##{
## import assert ;
##
## local nl = "
##" ;
##
## local save-os = [ modules.peek os : .name ] ;
##
## modules.poke os : .name : LINUX ;
##
## assert.result "PATH=foo:bar:baz$(nl)export PATH$(nl)"
## : path-variable-setting-command PATH : foo bar baz ;
##
## assert.result "PATH=foo:bar:$PATH$(nl)export PATH$(nl)"
## : prepend-path-variable-command PATH : foo bar ;
##
## modules.poke os : .name : NT ;
##
## assert.result "set PATH=foo;bar;baz$(nl)"
## : path-variable-setting-command PATH : foo bar baz ;
##
## assert.result "set PATH=foo;bar;%PATH%$(nl)"
## : prepend-path-variable-command PATH : foo bar ;
##
## modules.poke os : .name : $(save-os) ;
##}
def init(manager):
engine = manager.engine()
engine.register_action("common.MkDir1-quick-fix-for-unix", 'mkdir -p "$(<)"')
engine.register_action("common.MkDir1-quick-fix-for-windows", 'if not exist "$(<)\\" mkdir "$(<)"')
import b2.tools.make
import b2.build.alias
global __RM, __CP, __IGNORE, __LN
# ported from trunk@47281
if os_name() == 'NT':
__RM = 'del /f /q'
__CP = 'copy'
__IGNORE = '2>nul >nul & setlocal'
__LN = __CP
#if not __LN:
# __LN = CP
else:
__RM = 'rm -f'
__CP = 'cp'
__IGNORE = ''
__LN = 'ln'
engine.register_action("common.Clean", __RM + ' "$(>)"',
flags=['piecemeal', 'together', 'existing'])
engine.register_action("common.copy", __CP + ' "$(>)" "$(<)"')
engine.register_action("common.RmTemps", __RM + ' "$(>)" ' + __IGNORE,
flags=['quietly', 'updated', 'piecemeal', 'together'])
engine.register_action("common.hard-link",
__RM + ' "$(<)" 2$(NULL_OUT) $(NULL_OUT)' + os.linesep +
__LN + ' "$(>)" "$(<)" $(NULL_OUT)')
| apache-2.0 |
jtux270/translate | FreeIPA/freeipa-3.0.0/tests/test_xmlrpc/test_pwpolicy_plugin.py | 2 | 9060 | # Authors:
# Rob Crittenden <rcritten@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib/plugins/pwpolicy.py` module.
"""
import sys
from nose.tools import assert_raises # pylint: disable=E0611
from xmlrpc_test import XMLRPC_test, assert_attr_equal
from ipalib import api
from ipalib import errors
class test_pwpolicy(XMLRPC_test):
"""
Test the `pwpolicy` plugin.
"""
group = u'testgroup12'
group2 = u'testgroup22'
group3 = u'testgroup32'
user = u'testuser12'
kw = {'cospriority': 1, 'krbminpwdlife': 30, 'krbmaxpwdlife': 40, 'krbpwdhistorylength': 5, 'krbpwdminlength': 6 }
kw2 = {'cospriority': 2, 'krbminpwdlife': 40, 'krbmaxpwdlife': 60, 'krbpwdhistorylength': 8, 'krbpwdminlength': 9 }
kw3 = {'cospriority': 10, 'krbminpwdlife': 50, 'krbmaxpwdlife': 30, 'krbpwdhistorylength': 3, 'krbpwdminlength': 4 }
global_policy = u'global_policy'
def test_1_pwpolicy_add(self):
"""
Test adding a per-group policy using the `xmlrpc.pwpolicy_add` method.
"""
# First set up a group and user that will use this policy
self.failsafe_add(
api.Object.group, self.group, description=u'pwpolicy test group',
)
self.failsafe_add(
api.Object.user, self.user, givenname=u'Test', sn=u'User'
)
api.Command.group_add_member(self.group, user=self.user)
entry = api.Command['pwpolicy_add'](self.group, **self.kw)['result']
assert_attr_equal(entry, 'krbminpwdlife', '30')
assert_attr_equal(entry, 'krbmaxpwdlife', '40')
assert_attr_equal(entry, 'krbpwdhistorylength', '5')
assert_attr_equal(entry, 'krbpwdminlength', '6')
assert_attr_equal(entry, 'cospriority', '1')
def test_2_pwpolicy_add(self):
"""
Add a policy with a already used priority.
The priority validation is done first, so it's OK that the group
is the same here.
"""
try:
api.Command['pwpolicy_add'](self.group, **self.kw)
except errors.ValidationError:
pass
else:
assert False
def test_3_pwpolicy_add(self):
"""
Add a policy that already exists.
"""
try:
# cospriority needs to be unique
self.kw['cospriority'] = 3
api.Command['pwpolicy_add'](self.group, **self.kw)
except errors.DuplicateEntry:
pass
else:
assert False
def test_4_pwpolicy_add(self):
"""
Test adding another per-group policy using the `xmlrpc.pwpolicy_add` method.
"""
self.failsafe_add(
api.Object.group, self.group2, description=u'pwpolicy test group 2'
)
entry = api.Command['pwpolicy_add'](self.group2, **self.kw2)['result']
assert_attr_equal(entry, 'krbminpwdlife', '40')
assert_attr_equal(entry, 'krbmaxpwdlife', '60')
assert_attr_equal(entry, 'krbpwdhistorylength', '8')
assert_attr_equal(entry, 'krbpwdminlength', '9')
assert_attr_equal(entry, 'cospriority', '2')
def test_5_pwpolicy_add(self):
"""
Add a pwpolicy for a non-existent group
"""
try:
api.Command['pwpolicy_add'](u'nopwpolicy', cospriority=1, krbminpwdlife=1)
except errors.NotFound:
pass
else:
assert False
def test_6_pwpolicy_show(self):
"""
Test the `xmlrpc.pwpolicy_show` method with global policy.
"""
entry = api.Command['pwpolicy_show']()['result']
# Note that this assumes an unchanged global policy
assert_attr_equal(entry, 'krbminpwdlife', '1')
assert_attr_equal(entry, 'krbmaxpwdlife', '90')
assert_attr_equal(entry, 'krbpwdhistorylength', '0')
assert_attr_equal(entry, 'krbpwdminlength', '8')
def test_7_pwpolicy_show(self):
"""
Test the `xmlrpc.pwpolicy_show` method.
"""
entry = api.Command['pwpolicy_show'](self.group)['result']
assert_attr_equal(entry, 'krbminpwdlife', '30')
assert_attr_equal(entry, 'krbmaxpwdlife', '40')
assert_attr_equal(entry, 'krbpwdhistorylength', '5')
assert_attr_equal(entry, 'krbpwdminlength', '6')
assert_attr_equal(entry, 'cospriority', '1')
def test_8_pwpolicy_mod(self):
"""
Test the `xmlrpc.pwpolicy_mod` method for global policy.
"""
entry = api.Command['pwpolicy_mod'](krbminpwdlife=50)['result']
assert_attr_equal(entry, 'krbminpwdlife', '50')
# Great, now change it back
entry = api.Command['pwpolicy_mod'](krbminpwdlife=1)['result']
assert_attr_equal(entry, 'krbminpwdlife', '1')
def test_9_pwpolicy_mod(self):
"""
Test the `xmlrpc.pwpolicy_mod` method.
"""
entry = api.Command['pwpolicy_mod'](self.group, krbminpwdlife=50)['result']
assert_attr_equal(entry, 'krbminpwdlife', '50')
def test_a_pwpolicy_managed(self):
"""
Test adding password policy to a managed group.
"""
try:
entry = api.Command['pwpolicy_add'](self.user, krbminpwdlife=50, cospriority=2)['result']
except errors.ManagedPolicyError:
pass
else:
assert False
def test_b_pwpolicy_add(self):
"""
Test adding a third per-group policy using the `xmlrpc.pwpolicy_add` method.
"""
self.failsafe_add(
api.Object.group, self.group3, description=u'pwpolicy test group 3'
)
entry = api.Command['pwpolicy_add'](self.group3, **self.kw3)['result']
assert_attr_equal(entry, 'krbminpwdlife', '50')
assert_attr_equal(entry, 'krbmaxpwdlife', '30')
assert_attr_equal(entry, 'krbpwdhistorylength', '3')
assert_attr_equal(entry, 'krbpwdminlength', '4')
assert_attr_equal(entry, 'cospriority', '10')
def test_c_pwpolicy_find(self):
"""Test that password policies are sorted and reported properly"""
result = api.Command['pwpolicy_find']()['result']
assert len(result) == 4
# Test that policies are sorted in numerical order
assert result[0]['cn'] == (self.group,)
assert result[1]['cn'] == (self.group2,)
assert result[2]['cn'] == (self.group3,)
assert result[3]['cn'] == ('global_policy',)
# Test that returned values match the arguments
# Only test the second and third results; the first one was modified
for entry, expected in (result[1], self.kw2), (result[2], self.kw3):
for name, value in expected.iteritems():
assert_attr_equal(entry, name, str(value))
def test_c_pwpolicy_find_pkey_only(self):
"""Test that password policies are sorted properly with --pkey-only"""
result = api.Command['pwpolicy_find'](pkey_only=True)['result']
assert len(result) == 4
assert result[0]['cn'] == (self.group,)
assert result[1]['cn'] == (self.group2,)
assert result[2]['cn'] == (self.group3,)
assert result[3]['cn'] == ('global_policy',)
def test_d_pwpolicy_show(self):
"""Test that deleting a group removes its pwpolicy"""
api.Command['group_del'](self.group3)
with assert_raises(errors.NotFound):
api.Command['pwpolicy_show'](self.group3)
def test_e_pwpolicy_del(self):
"""
Test the `xmlrpc.pwpolicy_del` method.
"""
api.Command['pwpolicy_del'](self.group)
# Verify that it is gone
try:
api.Command['pwpolicy_show'](self.group)
except errors.NotFound:
pass
else:
assert False
# Verify that global policy cannot be deleted
try:
api.Command['pwpolicy_del'](self.global_policy)
except errors.ValidationError:
pass
else:
assert False
try:
api.Command['pwpolicy_show'](self.global_policy)
except errors.NotFound:
assert False
# Remove the groups we created
api.Command['group_del'](self.group)
api.Command['group_del'](self.group2)
# Remove the user we created
api.Command['user_del'](self.user)
| gpl-3.0 |
elainexmas/boto | tests/integration/ec2containerservice/__init__.py | 99 | 1122 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
rokihi/ObjectDetector | YOLO_tiny_tf.py | 1 | 9863 | import numpy as np
import tensorflow as tf
import cv2
import time
import sys
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = 'weights/YOLO_tiny.ckpt'
alpha = 0.1
threshold = 0.1
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
w_img = 640
h_img = 480
def __init__(self,argvs = []):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self,argvs):
for i in range(1,len(argvs),2):
if argvs[i] == '-fromfile' : self.fromfile = argvs[i+1]
if argvs[i] == '-tofile_img' : self.tofile_img = argvs[i+1] ; self.filewrite_img = True
if argvs[i] == '-tofile_txt' : self.tofile_txt = argvs[i+1] ; self.filewrite_txt = True
if argvs[i] == '-imshow' :
if argvs[i+1] == '1' :self.imshow = True
else : self.imshow = False
if argvs[i] == '-disp_console' :
if argvs[i+1] == '1' :self.disp_console = True
else : self.disp_console = False
def build_networks(self):
if self.disp_console : print "Building YOLO_tiny graph..."
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,16,3,1)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,32,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,64,3,1)
self.pool_6 = self.pooling_layer(6,self.conv_5,2,2)
self.conv_7 = self.conv_layer(7,self.pool_6,128,3,1)
self.pool_8 = self.pooling_layer(8,self.conv_7,2,2)
self.conv_9 = self.conv_layer(9,self.pool_8,256,3,1)
self.pool_10 = self.pooling_layer(10,self.conv_9,2,2)
self.conv_11 = self.conv_layer(11,self.pool_10,512,3,1)
self.pool_12 = self.pooling_layer(12,self.conv_11,2,2)
self.conv_13 = self.conv_layer(13,self.pool_12,1024,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,1024,3,1)
self.conv_15 = self.conv_layer(15,self.conv_14,1024,3,1)
self.fc_16 = self.fc_layer(16,self.conv_15,256,flat=True,linear=False)
self.fc_17 = self.fc_layer(17,self.fc_16,4096,flat=False,linear=False)
#skip dropout_18
self.fc_19 = self.fc_layer(19,self.fc_17,1470,flat=False,linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
if self.disp_console : print ' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
if self.disp_console : print ' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride)
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console : print ' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(self,img):
s = time.time()
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_19,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img,self.result)
strtime = str(time.time()-s)
if self.disp_console : print 'Elapsed time : ' + strtime + ' secs' + '\n'
def detect_from_file(self,filename):
if self.disp_console : print 'Detect from ' + filename
img = cv2.imread(filename)
#img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt','r').readlines(),dtype='float32')
inputs = np.zeros((1,448,448,3),dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0,y,x,c] = f[c*448*448+y*448+x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_19,feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes,img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def show_results(self,img,results):
img_cp = img.copy()
if self.filewrite_txt :
ftxt = open(self.tofile_txt,'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
if self.filewrite_txt :
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
if self.filewrite_img :
if self.disp_console : print ' image file writed : ' + self.tofile_img
cv2.imwrite(self.tofile_img,img_cp)
if self.imshow :
cv2.imshow('YOLO_tiny detection',img_cp)
cv2.waitKey(1)
if self.filewrite_txt :
if self.disp_console : print ' txt file writed : ' + self.tofile_txt
ftxt.close()
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def training(self): #TODO add training function!
return None
def main(argvs):
yolo = YOLO_TF(argvs)
cv2.waitKey(1000)
if __name__=='__main__':
main(sys.argv)
| lgpl-3.0 |
youprofit/godot | platform/osx/detect.py | 19 | 2873 |
import os
import sys
def is_active():
return True
def get_name():
return "OSX"
def can_build():
if (sys.platform != "darwin"):
return False
return True # osx enabled
def get_opts():
return [
('force_64_bits','Force 64 bits binary','no'),
]
def get_flags():
return [
('opengl', 'no'),
('legacygl', 'yes'),
('builtin_zlib', 'no'),
('freetype','builtin'), #use builtin freetype
]
def configure(env):
env.Append(CPPPATH=['#platform/osx'])
if (env["bits"]=="default"):
env["bits"]="32"
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O2','-ffast-math','-fomit-frame-pointer','-ftree-vectorize','-msse2'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g3', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['-DFREETYPE_ENABLED'])
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
if (env["bits"]=="64"):
env.Append(CCFLAGS=['-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'x86_64'])
else:
env.Append(CCFLAGS=['-arch', 'i386'])
env.Append(LINKFLAGS=['-arch', 'i386'])
# env.Append(CPPPATH=['#platform/osx/include/freetype2', '#platform/osx/include'])
# env.Append(LIBPATH=['#platform/osx/lib'])
#if env['opengl'] == 'yes':
# env.Append(CPPFLAGS=['-DOPENGL_ENABLED','-DGLEW_ENABLED'])
env.Append(CPPFLAGS=["-DAPPLE_STYLE_KEYS"])
env.Append(CPPFLAGS=['-DUNIX_ENABLED','-DGLES2_ENABLED','-DGLEW_ENABLED', '-DOSX_ENABLED'])
env.Append(LIBS=['pthread'])
#env.Append(CPPFLAGS=['-F/Developer/SDKs/MacOSX10.4u.sdk/System/Library/Frameworks', '-isysroot', '/Developer/SDKs/MacOSX10.4u.sdk', '-mmacosx-version-min=10.4'])
#env.Append(LINKFLAGS=['-mmacosx-version-min=10.4', '-isysroot', '/Developer/SDKs/MacOSX10.4u.sdk', '-Wl,-syslibroot,/Developer/SDKs/MacOSX10.4u.sdk'])
env.Append(LINKFLAGS=['-framework', 'Cocoa', '-framework', 'Carbon', '-framework', 'OpenGL', '-framework', 'AGL', '-framework', 'AudioUnit','-lz'])
if (env["CXX"]=="clang++"):
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env["CC"]="clang"
env["LD"]="clang++"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CPPFLAGS=["-fcolor-diagnostics"])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
#env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
| mit |
foreni-packages/hachoir-core | hachoir_core/i18n.py | 86 | 6241 | # -*- coding: UTF-8 -*-
"""
Functions to manage internationalisation (i18n):
- initLocale(): setup locales and install Unicode compatible stdout and
stderr ;
- getTerminalCharset(): guess terminal charset ;
- gettext(text) translate a string to current language. The function always
returns Unicode string. You can also use the alias: _() ;
- ngettext(singular, plural, count): translate a sentence with singular and
plural form. The function always returns Unicode string.
WARNING: Loading this module indirectly calls initLocale() which sets
locale LC_ALL to ''. This is needed to get user preferred locale
settings.
"""
import hachoir_core.config as config
import hachoir_core
import locale
from os import path
import sys
from codecs import BOM_UTF8, BOM_UTF16_LE, BOM_UTF16_BE
def _getTerminalCharset():
"""
Function used by getTerminalCharset() to get terminal charset.
@see getTerminalCharset()
"""
# (1) Try locale.getpreferredencoding()
try:
charset = locale.getpreferredencoding()
if charset:
return charset
except (locale.Error, AttributeError):
pass
# (2) Try locale.nl_langinfo(CODESET)
try:
charset = locale.nl_langinfo(locale.CODESET)
if charset:
return charset
except (locale.Error, AttributeError):
pass
# (3) Try sys.stdout.encoding
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
return sys.stdout.encoding
# (4) Otherwise, returns "ASCII"
return "ASCII"
def getTerminalCharset():
"""
Guess terminal charset using differents tests:
1. Try locale.getpreferredencoding()
2. Try locale.nl_langinfo(CODESET)
3. Try sys.stdout.encoding
4. Otherwise, returns "ASCII"
WARNING: Call initLocale() before calling this function.
"""
try:
return getTerminalCharset.value
except AttributeError:
getTerminalCharset.value = _getTerminalCharset()
return getTerminalCharset.value
class UnicodeStdout(object):
def __init__(self, old_device, charset):
self.device = old_device
self.charset = charset
def flush(self):
self.device.flush()
def write(self, text):
if isinstance(text, unicode):
text = text.encode(self.charset, 'replace')
self.device.write(text)
def writelines(self, lines):
for text in lines:
self.write(text)
def initLocale():
# Only initialize locale once
if initLocale.is_done:
return getTerminalCharset()
initLocale.is_done = True
# Setup locales
try:
locale.setlocale(locale.LC_ALL, "")
except (locale.Error, IOError):
pass
# Get the terminal charset
charset = getTerminalCharset()
# UnicodeStdout conflicts with the readline module
if config.unicode_stdout and ('readline' not in sys.modules):
# Replace stdout and stderr by unicode objet supporting unicode string
sys.stdout = UnicodeStdout(sys.stdout, charset)
sys.stderr = UnicodeStdout(sys.stderr, charset)
return charset
initLocale.is_done = False
def _dummy_gettext(text):
return unicode(text)
def _dummy_ngettext(singular, plural, count):
if 1 < abs(count) or not count:
return unicode(plural)
else:
return unicode(singular)
def _initGettext():
charset = initLocale()
# Try to load gettext module
if config.use_i18n:
try:
import gettext
ok = True
except ImportError:
ok = False
else:
ok = False
# gettext is not available or not needed: use dummy gettext functions
if not ok:
return (_dummy_gettext, _dummy_ngettext)
# Gettext variables
package = hachoir_core.PACKAGE
locale_dir = path.join(path.dirname(__file__), "..", "locale")
# Initialize gettext module
gettext.bindtextdomain(package, locale_dir)
gettext.textdomain(package)
translate = gettext.gettext
ngettext = gettext.ngettext
# TODO: translate_unicode lambda function really sucks!
# => find native function to do that
unicode_gettext = lambda text: \
unicode(translate(text), charset)
unicode_ngettext = lambda singular, plural, count: \
unicode(ngettext(singular, plural, count), charset)
return (unicode_gettext, unicode_ngettext)
UTF_BOMS = (
(BOM_UTF8, "UTF-8"),
(BOM_UTF16_LE, "UTF-16-LE"),
(BOM_UTF16_BE, "UTF-16-BE"),
)
# Set of valid characters for specific charset
CHARSET_CHARACTERS = (
# U+00E0: LATIN SMALL LETTER A WITH GRAVE
(set(u"©®éêè\xE0ç".encode("ISO-8859-1")), "ISO-8859-1"),
(set(u"©®éêè\xE0ç€".encode("ISO-8859-15")), "ISO-8859-15"),
(set(u"©®".encode("MacRoman")), "MacRoman"),
(set(u"εδηιθκμοΡσςυΈί".encode("ISO-8859-7")), "ISO-8859-7"),
)
def guessBytesCharset(bytes, default=None):
r"""
>>> guessBytesCharset("abc")
'ASCII'
>>> guessBytesCharset("\xEF\xBB\xBFabc")
'UTF-8'
>>> guessBytesCharset("abc\xC3\xA9")
'UTF-8'
>>> guessBytesCharset("File written by Adobe Photoshop\xA8 4.0\0")
'MacRoman'
>>> guessBytesCharset("\xE9l\xE9phant")
'ISO-8859-1'
>>> guessBytesCharset("100 \xA4")
'ISO-8859-15'
>>> guessBytesCharset('Word \xb8\xea\xe4\xef\xf3\xe7 - Microsoft Outlook 97 - \xd1\xf5\xe8\xec\xdf\xf3\xe5\xe9\xf2 e-mail')
'ISO-8859-7'
"""
# Check for UTF BOM
for bom_bytes, charset in UTF_BOMS:
if bytes.startswith(bom_bytes):
return charset
# Pure ASCII?
try:
text = unicode(bytes, 'ASCII', 'strict')
return 'ASCII'
except UnicodeDecodeError:
pass
# Valid UTF-8?
try:
text = unicode(bytes, 'UTF-8', 'strict')
return 'UTF-8'
except UnicodeDecodeError:
pass
# Create a set of non-ASCII characters
non_ascii_set = set( byte for byte in bytes if ord(byte) >= 128 )
for characters, charset in CHARSET_CHARACTERS:
if characters.issuperset(non_ascii_set):
return charset
return default
# Initialize _(), gettext() and ngettext() functions
gettext, ngettext = _initGettext()
_ = gettext
| gpl-2.0 |
ace02000/pyload | module/plugins/accounts/FourSharedCom.py | 3 | 1164 | # -*- coding: utf-8 -*-
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import set_cookie
class FourSharedCom(Account):
__name__ = "FourSharedCom"
__type__ = "account"
__version__ = "0.09"
__status__ = "testing"
__description__ = """FourShared.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("stickell", "l.stickell@yahoo.it")]
def grab_info(self, user, password, data):
#: Free mode only for now
return {'premium': False}
def signin(self, user, password, data):
set_cookie(self.req.cj, "4shared.com", "4langcookie", "en")
res = self.load("https://www.4shared.com/web/login",
post={'login' : user,
'password' : password,
'remember' : "on",
'_remember': "on",
'returnTo' : "http://www.4shared.com/account/home.jsp"})
if 'Please log in to access your 4shared account' in res:
self.fail_login()
| gpl-3.0 |
sspreitzer/tahoe-lafs | src/allmydata/immutable/encode.py | 6 | 30138 | # -*- test-case-name: allmydata.test.test_encode -*-
import time
from zope.interface import implements
from twisted.internet import defer
from foolscap.api import fireEventually
from allmydata import uri
from allmydata.storage.server import si_b2a
from allmydata.hashtree import HashTree
from allmydata.util import mathutil, hashutil, base32, log, happinessutil
from allmydata.util.assertutil import _assert, precondition
from allmydata.codec import CRSEncoder
from allmydata.interfaces import IEncoder, IStorageBucketWriter, \
IEncryptedUploadable, IUploadStatus, UploadUnhappinessError
"""
The goal of the encoder is to turn the original file into a series of
'shares'. Each share is going to a 'shareholder' (nominally each shareholder
is a different host, but for small grids there may be overlap). The number
of shares is chosen to hit our reliability goals (more shares on more
machines means more reliability), and is limited by overhead (proportional to
numshares or log(numshares)) and the encoding technology in use (zfec permits
only 256 shares total). It is also constrained by the amount of data
we want to send to each host. For estimating purposes, think of 10 shares
out of which we need 3 to reconstruct the file.
The encoder starts by cutting the original file into segments. All segments
except the last are of equal size. The segment size is chosen to constrain
the memory footprint (which will probably vary between 1x and 4x segment
size) and to constrain the overhead (which will be proportional to
log(number of segments)).
Each segment (A,B,C) is read into memory, encrypted, and encoded into
blocks. The 'share' (say, share #1) that makes it out to a host is a
collection of these blocks (block A1, B1, C1), plus some hash-tree
information necessary to validate the data upon retrieval. Only one segment
is handled at a time: all blocks for segment A are delivered before any
work is begun on segment B.
As blocks are created, we retain the hash of each one. The list of block hashes
for a single share (say, hash(A1), hash(B1), hash(C1)) is used to form the base
of a Merkle hash tree for that share, called the block hash tree.
This hash tree has one terminal leaf per block. The complete block hash
tree is sent to the shareholder after all the data has been sent. At
retrieval time, the decoder will ask for specific pieces of this tree before
asking for blocks, whichever it needs to validate those blocks.
(Note: we don't really need to generate this whole block hash tree
ourselves. It would be sufficient to have the shareholder generate it and
just tell us the root. This gives us an extra level of validation on the
transfer, though, and it is relatively cheap to compute.)
Each of these block hash trees has a root hash. The collection of these
root hashes for all shares are collected into the 'share hash tree', which
has one terminal leaf per share. After sending the blocks and the complete
block hash tree to each shareholder, we send them the portion of the share
hash tree that is necessary to validate their share. The root of the share
hash tree is put into the URI.
"""
class UploadAborted(Exception):
pass
KiB=1024
MiB=1024*KiB
GiB=1024*MiB
TiB=1024*GiB
PiB=1024*TiB
class Encoder(object):
implements(IEncoder)
def __init__(self, log_parent=None, upload_status=None):
object.__init__(self)
self.uri_extension_data = {}
self._codec = None
self._status = None
if upload_status:
self._status = IUploadStatus(upload_status)
precondition(log_parent is None or isinstance(log_parent, int),
log_parent)
self._log_number = log.msg("creating Encoder %s" % self,
facility="tahoe.encoder", parent=log_parent)
self._aborted = False
def __repr__(self):
if hasattr(self, "_storage_index"):
return "<Encoder for %s>" % si_b2a(self._storage_index)[:5]
return "<Encoder for unknown storage index>"
def log(self, *args, **kwargs):
if "parent" not in kwargs:
kwargs["parent"] = self._log_number
if "facility" not in kwargs:
kwargs["facility"] = "tahoe.encoder"
return log.msg(*args, **kwargs)
def set_encrypted_uploadable(self, uploadable):
eu = self._uploadable = IEncryptedUploadable(uploadable)
d = eu.get_size()
def _got_size(size):
self.log(format="file size: %(size)d", size=size)
self.file_size = size
d.addCallback(_got_size)
d.addCallback(lambda res: eu.get_all_encoding_parameters())
d.addCallback(self._got_all_encoding_parameters)
d.addCallback(lambda res: eu.get_storage_index())
def _done(storage_index):
self._storage_index = storage_index
return self
d.addCallback(_done)
return d
def _got_all_encoding_parameters(self, params):
assert not self._codec
k, happy, n, segsize = params
self.required_shares = k
self.servers_of_happiness = happy
self.num_shares = n
self.segment_size = segsize
self.log("got encoding parameters: %d/%d/%d %d" % (k,happy,n, segsize))
self.log("now setting up codec")
assert self.segment_size % self.required_shares == 0
self.num_segments = mathutil.div_ceil(self.file_size,
self.segment_size)
self._codec = CRSEncoder()
self._codec.set_params(self.segment_size,
self.required_shares, self.num_shares)
data = self.uri_extension_data
data['codec_name'] = self._codec.get_encoder_type()
data['codec_params'] = self._codec.get_serialized_params()
data['size'] = self.file_size
data['segment_size'] = self.segment_size
self.share_size = mathutil.div_ceil(self.file_size,
self.required_shares)
data['num_segments'] = self.num_segments
data['needed_shares'] = self.required_shares
data['total_shares'] = self.num_shares
# the "tail" is the last segment. This segment may or may not be
# shorter than all other segments. We use the "tail codec" to handle
# it. If the tail is short, we use a different codec instance. In
# addition, the tail codec must be fed data which has been padded out
# to the right size.
tail_size = self.file_size % self.segment_size
if not tail_size:
tail_size = self.segment_size
# the tail codec is responsible for encoding tail_size bytes
padded_tail_size = mathutil.next_multiple(tail_size,
self.required_shares)
self._tail_codec = CRSEncoder()
self._tail_codec.set_params(padded_tail_size,
self.required_shares, self.num_shares)
data['tail_codec_params'] = self._tail_codec.get_serialized_params()
def _get_share_size(self):
share_size = mathutil.div_ceil(self.file_size, self.required_shares)
overhead = self._compute_overhead()
return share_size + overhead
def _compute_overhead(self):
return 0
def get_param(self, name):
assert self._codec
if name == "storage_index":
return self._storage_index
elif name == "share_counts":
return (self.required_shares, self.servers_of_happiness,
self.num_shares)
elif name == "num_segments":
return self.num_segments
elif name == "segment_size":
return self.segment_size
elif name == "block_size":
return self._codec.get_block_size()
elif name == "share_size":
return self._get_share_size()
elif name == "serialized_params":
return self._codec.get_serialized_params()
else:
raise KeyError("unknown parameter name '%s'" % name)
def set_shareholders(self, landlords, servermap):
assert isinstance(landlords, dict)
for k in landlords:
assert IStorageBucketWriter.providedBy(landlords[k])
self.landlords = landlords.copy()
assert isinstance(servermap, dict)
for v in servermap.itervalues():
assert isinstance(v, set)
self.servermap = servermap.copy()
def start(self):
""" Returns a Deferred that will fire with the verify cap (an instance of
uri.CHKFileVerifierURI)."""
self.log("%s starting" % (self,))
#paddedsize = self._size + mathutil.pad_size(self._size, self.needed_shares)
assert self._codec
self._crypttext_hasher = hashutil.crypttext_hasher()
self._crypttext_hashes = []
self.segment_num = 0
self.block_hashes = [[] for x in range(self.num_shares)]
# block_hashes[i] is a list that will be accumulated and then send
# to landlord[i]. This list contains a hash of each segment_share
# that we sent to that landlord.
self.share_root_hashes = [None] * self.num_shares
self._times = {
"cumulative_encoding": 0.0,
"cumulative_sending": 0.0,
"hashes_and_close": 0.0,
"total_encode_and_push": 0.0,
}
self._start_total_timestamp = time.time()
d = fireEventually()
d.addCallback(lambda res: self.start_all_shareholders())
for i in range(self.num_segments-1):
# note to self: this form doesn't work, because lambda only
# captures the slot, not the value
#d.addCallback(lambda res: self.do_segment(i))
# use this form instead:
d.addCallback(lambda res, i=i: self._encode_segment(i))
d.addCallback(self._send_segment, i)
d.addCallback(self._turn_barrier)
last_segnum = self.num_segments - 1
d.addCallback(lambda res: self._encode_tail_segment(last_segnum))
d.addCallback(self._send_segment, last_segnum)
d.addCallback(self._turn_barrier)
d.addCallback(lambda res: self.finish_hashing())
d.addCallback(lambda res:
self.send_crypttext_hash_tree_to_all_shareholders())
d.addCallback(lambda res: self.send_all_block_hash_trees())
d.addCallback(lambda res: self.send_all_share_hash_trees())
d.addCallback(lambda res: self.send_uri_extension_to_all_shareholders())
d.addCallback(lambda res: self.close_all_shareholders())
d.addCallbacks(self.done, self.err)
return d
def set_status(self, status):
if self._status:
self._status.set_status(status)
def set_encode_and_push_progress(self, sent_segments=None, extra=0.0):
if self._status:
# we treat the final hash+close as an extra segment
if sent_segments is None:
sent_segments = self.num_segments
progress = float(sent_segments + extra) / (self.num_segments + 1)
self._status.set_progress(2, progress)
def abort(self):
self.log("aborting upload", level=log.UNUSUAL)
assert self._codec, "don't call abort before start"
self._aborted = True
# the next segment read (in _gather_data inside _encode_segment) will
# raise UploadAborted(), which will bypass the rest of the upload
# chain. If we've sent the final segment's shares, it's too late to
# abort. TODO: allow abort any time up to close_all_shareholders.
def _turn_barrier(self, res):
# putting this method in a Deferred chain imposes a guaranteed
# reactor turn between the pre- and post- portions of that chain.
# This can be useful to limit memory consumption: since Deferreds do
# not do tail recursion, code which uses defer.succeed(result) for
# consistency will cause objects to live for longer than you might
# normally expect.
return fireEventually(res)
def start_all_shareholders(self):
self.log("starting shareholders", level=log.NOISY)
self.set_status("Starting shareholders")
dl = []
for shareid in list(self.landlords):
d = self.landlords[shareid].put_header()
d.addErrback(self._remove_shareholder, shareid, "start")
dl.append(d)
return self._gather_responses(dl)
def _encode_segment(self, segnum):
codec = self._codec
start = time.time()
# the ICodecEncoder API wants to receive a total of self.segment_size
# bytes on each encode() call, broken up into a number of
# identically-sized pieces. Due to the way the codec algorithm works,
# these pieces need to be the same size as the share which the codec
# will generate. Therefore we must feed it with input_piece_size that
# equals the output share size.
input_piece_size = codec.get_block_size()
# as a result, the number of input pieces per encode() call will be
# equal to the number of required shares with which the codec was
# constructed. You can think of the codec as chopping up a
# 'segment_size' of data into 'required_shares' shares (not doing any
# fancy math at all, just doing a split), then creating some number
# of additional shares which can be substituted if the primary ones
# are unavailable
# we read data from the source one segment at a time, and then chop
# it into 'input_piece_size' pieces before handing it to the codec
crypttext_segment_hasher = hashutil.crypttext_segment_hasher()
# memory footprint: we only hold a tiny piece of the plaintext at any
# given time. We build up a segment's worth of cryptttext, then hand
# it to the encoder. Assuming 3-of-10 encoding (3.3x expansion) and
# 1MiB max_segment_size, we get a peak memory footprint of 4.3*1MiB =
# 4.3MiB. Lowering max_segment_size to, say, 100KiB would drop the
# footprint to 430KiB at the expense of more hash-tree overhead.
d = self._gather_data(self.required_shares, input_piece_size,
crypttext_segment_hasher)
def _done_gathering(chunks):
for c in chunks:
assert len(c) == input_piece_size
self._crypttext_hashes.append(crypttext_segment_hasher.digest())
# during this call, we hit 5*segsize memory
return codec.encode(chunks)
d.addCallback(_done_gathering)
def _done(res):
elapsed = time.time() - start
self._times["cumulative_encoding"] += elapsed
return res
d.addCallback(_done)
return d
def _encode_tail_segment(self, segnum):
start = time.time()
codec = self._tail_codec
input_piece_size = codec.get_block_size()
crypttext_segment_hasher = hashutil.crypttext_segment_hasher()
d = self._gather_data(self.required_shares, input_piece_size,
crypttext_segment_hasher, allow_short=True)
def _done_gathering(chunks):
for c in chunks:
# a short trailing chunk will have been padded by
# _gather_data
assert len(c) == input_piece_size
self._crypttext_hashes.append(crypttext_segment_hasher.digest())
return codec.encode(chunks)
d.addCallback(_done_gathering)
def _done(res):
elapsed = time.time() - start
self._times["cumulative_encoding"] += elapsed
return res
d.addCallback(_done)
return d
def _gather_data(self, num_chunks, input_chunk_size,
crypttext_segment_hasher,
allow_short=False):
"""Return a Deferred that will fire when the required number of
chunks have been read (and hashed and encrypted). The Deferred fires
with a list of chunks, each of size input_chunk_size."""
# I originally built this to allow read_encrypted() to behave badly:
# to let it return more or less data than you asked for. It would
# stash the leftovers until later, and then recurse until it got
# enough. I don't think that was actually useful.
#
# who defines read_encrypted?
# offloaded.LocalCiphertextReader: real disk file: exact
# upload.EncryptAnUploadable: Uploadable, but a wrapper that makes
# it exact. The return value is a list of 50KiB chunks, to reduce
# the memory footprint of the encryption process.
# repairer.Repairer: immutable.filenode.CiphertextFileNode: exact
#
# This has been redefined to require read_encrypted() to behave like
# a local file: return exactly the amount requested unless it hits
# EOF.
# -warner
if self._aborted:
raise UploadAborted()
read_size = num_chunks * input_chunk_size
d = self._uploadable.read_encrypted(read_size, hash_only=False)
def _got(data):
assert isinstance(data, (list,tuple))
if self._aborted:
raise UploadAborted()
data = "".join(data)
precondition(len(data) <= read_size, len(data), read_size)
if not allow_short:
precondition(len(data) == read_size, len(data), read_size)
crypttext_segment_hasher.update(data)
self._crypttext_hasher.update(data)
if allow_short and len(data) < read_size:
# padding
data += "\x00" * (read_size - len(data))
encrypted_pieces = [data[i:i+input_chunk_size]
for i in range(0, len(data), input_chunk_size)]
return encrypted_pieces
d.addCallback(_got)
return d
def _send_segment(self, (shares, shareids), segnum):
# To generate the URI, we must generate the roothash, so we must
# generate all shares, even if we aren't actually giving them to
# anybody. This means that the set of shares we create will be equal
# to or larger than the set of landlords. If we have any landlord who
# *doesn't* have a share, that's an error.
_assert(set(self.landlords.keys()).issubset(set(shareids)),
shareids=shareids, landlords=self.landlords)
start = time.time()
dl = []
self.set_status("Sending segment %d of %d" % (segnum+1,
self.num_segments))
self.set_encode_and_push_progress(segnum)
lognum = self.log("send_segment(%d)" % segnum, level=log.NOISY)
for i in range(len(shares)):
block = shares[i]
shareid = shareids[i]
d = self.send_block(shareid, segnum, block, lognum)
dl.append(d)
block_hash = hashutil.block_hash(block)
#from allmydata.util import base32
#log.msg("creating block (shareid=%d, blocknum=%d) "
# "len=%d %r .. %r: %s" %
# (shareid, segnum, len(block),
# block[:50], block[-50:], base32.b2a(block_hash)))
self.block_hashes[shareid].append(block_hash)
dl = self._gather_responses(dl)
def _logit(res):
self.log("%s uploaded %s / %s bytes (%d%%) of your file." %
(self,
self.segment_size*(segnum+1),
self.segment_size*self.num_segments,
100 * (segnum+1) / self.num_segments,
),
level=log.OPERATIONAL)
elapsed = time.time() - start
self._times["cumulative_sending"] += elapsed
return res
dl.addCallback(_logit)
return dl
def send_block(self, shareid, segment_num, block, lognum):
if shareid not in self.landlords:
return defer.succeed(None)
sh = self.landlords[shareid]
lognum2 = self.log("put_block to %s" % self.landlords[shareid],
parent=lognum, level=log.NOISY)
d = sh.put_block(segment_num, block)
def _done(res):
self.log("put_block done", parent=lognum2, level=log.NOISY)
return res
d.addCallback(_done)
d.addErrback(self._remove_shareholder, shareid,
"segnum=%d" % segment_num)
return d
def _remove_shareholder(self, why, shareid, where):
ln = self.log(format="error while sending %(method)s to shareholder=%(shnum)d",
method=where, shnum=shareid,
level=log.UNUSUAL, failure=why)
if shareid in self.landlords:
self.landlords[shareid].abort()
peerid = self.landlords[shareid].get_peerid()
assert peerid
del self.landlords[shareid]
self.servermap[shareid].remove(peerid)
if not self.servermap[shareid]:
del self.servermap[shareid]
else:
# even more UNUSUAL
self.log("they weren't in our list of landlords", parent=ln,
level=log.WEIRD, umid="TQGFRw")
happiness = happinessutil.servers_of_happiness(self.servermap)
if happiness < self.servers_of_happiness:
peerids = set(happinessutil.shares_by_server(self.servermap).keys())
msg = happinessutil.failure_message(len(peerids),
self.required_shares,
self.servers_of_happiness,
happiness)
msg = "%s: %s" % (msg, why)
raise UploadUnhappinessError(msg)
self.log("but we can still continue with %s shares, we'll be happy "
"with at least %s" % (happiness,
self.servers_of_happiness),
parent=ln)
def _gather_responses(self, dl):
d = defer.DeferredList(dl, fireOnOneErrback=True)
def _eatUploadUnhappinessError(f):
# all exceptions that occur while talking to a peer are handled
# in _remove_shareholder. That might raise UploadUnhappinessError,
# which will cause the DeferredList to errback but which should
# otherwise be consumed. Allow non-UploadUnhappinessError exceptions
# to pass through as an unhandled errback. We use this in lieu of
# consumeErrors=True to allow coding errors to be logged.
f.trap(UploadUnhappinessError)
return None
for d0 in dl:
d0.addErrback(_eatUploadUnhappinessError)
return d
def finish_hashing(self):
self._start_hashing_and_close_timestamp = time.time()
self.set_status("Finishing hashes")
self.set_encode_and_push_progress(extra=0.0)
crypttext_hash = self._crypttext_hasher.digest()
self.uri_extension_data["crypttext_hash"] = crypttext_hash
self._uploadable.close()
def send_crypttext_hash_tree_to_all_shareholders(self):
self.log("sending crypttext hash tree", level=log.NOISY)
self.set_status("Sending Crypttext Hash Tree")
self.set_encode_and_push_progress(extra=0.3)
t = HashTree(self._crypttext_hashes)
all_hashes = list(t)
self.uri_extension_data["crypttext_root_hash"] = t[0]
dl = []
for shareid in list(self.landlords):
dl.append(self.send_crypttext_hash_tree(shareid, all_hashes))
return self._gather_responses(dl)
def send_crypttext_hash_tree(self, shareid, all_hashes):
if shareid not in self.landlords:
return defer.succeed(None)
sh = self.landlords[shareid]
d = sh.put_crypttext_hashes(all_hashes)
d.addErrback(self._remove_shareholder, shareid, "put_crypttext_hashes")
return d
def send_all_block_hash_trees(self):
self.log("sending block hash trees", level=log.NOISY)
self.set_status("Sending Subshare Hash Trees")
self.set_encode_and_push_progress(extra=0.4)
dl = []
for shareid,hashes in enumerate(self.block_hashes):
# hashes is a list of the hashes of all blocks that were sent
# to shareholder[shareid].
dl.append(self.send_one_block_hash_tree(shareid, hashes))
return self._gather_responses(dl)
def send_one_block_hash_tree(self, shareid, block_hashes):
t = HashTree(block_hashes)
all_hashes = list(t)
# all_hashes[0] is the root hash, == hash(ah[1]+ah[2])
# all_hashes[1] is the left child, == hash(ah[3]+ah[4])
# all_hashes[n] == hash(all_hashes[2*n+1] + all_hashes[2*n+2])
self.share_root_hashes[shareid] = t[0]
if shareid not in self.landlords:
return defer.succeed(None)
sh = self.landlords[shareid]
d = sh.put_block_hashes(all_hashes)
d.addErrback(self._remove_shareholder, shareid, "put_block_hashes")
return d
def send_all_share_hash_trees(self):
# Each bucket gets a set of share hash tree nodes that are needed to validate their
# share. This includes the share hash itself, but does not include the top-level hash
# root (which is stored securely in the URI instead).
self.log("sending all share hash trees", level=log.NOISY)
self.set_status("Sending Share Hash Trees")
self.set_encode_and_push_progress(extra=0.6)
dl = []
for h in self.share_root_hashes:
assert h
# create the share hash tree
t = HashTree(self.share_root_hashes)
# the root of this hash tree goes into our URI
self.uri_extension_data['share_root_hash'] = t[0]
# now send just the necessary pieces out to each shareholder
for i in range(self.num_shares):
# the HashTree is given a list of leaves: 0,1,2,3..n .
# These become nodes A+0,A+1,A+2.. of the tree, where A=n-1
needed_hash_indices = t.needed_hashes(i, include_leaf=True)
hashes = [(hi, t[hi]) for hi in needed_hash_indices]
dl.append(self.send_one_share_hash_tree(i, hashes))
return self._gather_responses(dl)
def send_one_share_hash_tree(self, shareid, needed_hashes):
if shareid not in self.landlords:
return defer.succeed(None)
sh = self.landlords[shareid]
d = sh.put_share_hashes(needed_hashes)
d.addErrback(self._remove_shareholder, shareid, "put_share_hashes")
return d
def send_uri_extension_to_all_shareholders(self):
lp = self.log("sending uri_extension", level=log.NOISY)
self.set_status("Sending URI Extensions")
self.set_encode_and_push_progress(extra=0.8)
for k in ('crypttext_root_hash', 'crypttext_hash',
):
assert k in self.uri_extension_data
uri_extension = uri.pack_extension(self.uri_extension_data)
ed = {}
for k,v in self.uri_extension_data.items():
if k.endswith("hash"):
ed[k] = base32.b2a(v)
else:
ed[k] = v
self.log("uri_extension_data is %s" % (ed,), level=log.NOISY, parent=lp)
self.uri_extension_hash = hashutil.uri_extension_hash(uri_extension)
dl = []
for shareid in list(self.landlords):
dl.append(self.send_uri_extension(shareid, uri_extension))
return self._gather_responses(dl)
def send_uri_extension(self, shareid, uri_extension):
sh = self.landlords[shareid]
d = sh.put_uri_extension(uri_extension)
d.addErrback(self._remove_shareholder, shareid, "put_uri_extension")
return d
def close_all_shareholders(self):
self.log("closing shareholders", level=log.NOISY)
self.set_status("Closing Shareholders")
self.set_encode_and_push_progress(extra=0.9)
dl = []
for shareid in list(self.landlords):
d = self.landlords[shareid].close()
d.addErrback(self._remove_shareholder, shareid, "close")
dl.append(d)
return self._gather_responses(dl)
def done(self, res):
self.log("upload done", level=log.OPERATIONAL)
self.set_status("Finished")
self.set_encode_and_push_progress(extra=1.0) # done
now = time.time()
h_and_c_elapsed = now - self._start_hashing_and_close_timestamp
self._times["hashes_and_close"] = h_and_c_elapsed
total_elapsed = now - self._start_total_timestamp
self._times["total_encode_and_push"] = total_elapsed
# update our sharemap
self._shares_placed = set(self.landlords.keys())
return uri.CHKFileVerifierURI(self._storage_index, self.uri_extension_hash,
self.required_shares, self.num_shares, self.file_size)
def err(self, f):
self.log("upload failed", failure=f, level=log.UNUSUAL)
self.set_status("Failed")
# we need to abort any remaining shareholders, so they'll delete the
# partial share, allowing someone else to upload it again.
self.log("aborting shareholders", level=log.UNUSUAL)
for shareid in list(self.landlords):
self.landlords[shareid].abort()
if f.check(defer.FirstError):
return f.value.subFailure
return f
def get_shares_placed(self):
# return a set of share numbers that were successfully placed.
return self._shares_placed
def get_times(self):
# return a dictionary of encode+push timings
return self._times
def get_uri_extension_data(self):
return self.uri_extension_data
def get_uri_extension_hash(self):
return self.uri_extension_hash
| gpl-2.0 |
ThiefMaster/indico | indico/modules/search/internal.py | 2 | 4309 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from sqlalchemy.orm import subqueryload, undefer
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.modules.categories import Category
from indico.modules.events import Event
from indico.modules.search.base import IndicoSearchProvider, SearchTarget
from indico.modules.search.result_schemas import CategoryResultSchema, EventResultSchema
from indico.modules.search.schemas import DetailedCategorySchema, HTMLStrippingEventSchema
class InternalSearch(IndicoSearchProvider):
def search(self, query, user=None, page=None, object_types=(), *, admin_override_enabled=False,
**params):
category_id = params.get('category_id')
if object_types == [SearchTarget.category]:
pagenav, results = self.search_categories(query, user, page, category_id,
admin_override_enabled)
elif object_types == [SearchTarget.event]:
pagenav, results = self.search_events(query, user, page, category_id,
admin_override_enabled)
else:
pagenav, results = {}, []
return {
'total': -1 if results else 0,
'pagenav': pagenav,
'results': results,
}
def _paginate(self, query, page, column, user, admin_override_enabled):
reverse = False
pagenav = {'prev': None, 'next': None}
if not page:
query = query.order_by(column.desc())
elif page > 0: # next page
query = query.filter(column < page).order_by(column.desc())
# since we asked for a next page we know that a previous page exists
pagenav['prev'] = -(page - 1)
elif page < 0: # prev page
query = query.filter(column > -page).order_by(column)
# since we asked for a previous page we know that a next page exists
pagenav['next'] = -(page - 1)
reverse = True
def _can_access(obj):
return (obj.effective_protection_mode == ProtectionMode.public or
obj.can_access(user, allow_admin=admin_override_enabled))
res = get_n_matching(query, self.RESULTS_PER_PAGE + 1, _can_access, prefetch_factor=20)
if len(res) > self.RESULTS_PER_PAGE:
# we queried 1 more so we can see if there are more results available
del res[self.RESULTS_PER_PAGE:]
if reverse:
pagenav['prev'] = -res[-1].id
else:
pagenav['next'] = res[-1].id
if reverse:
res.reverse()
return res, pagenav
def search_categories(self, q, user, page, category_id, admin_override_enabled):
query = Category.query if not category_id else Category.get(category_id).deep_children_query
query = (query
.filter(Category.title_matches(q),
~Category.is_deleted)
.options(undefer('chain'),
undefer(Category.effective_protection_mode),
subqueryload(Category.acl_entries)))
objs, pagenav = self._paginate(query, page, Category.id, user, admin_override_enabled)
res = DetailedCategorySchema(many=True).dump(objs)
return pagenav, CategoryResultSchema(many=True).load(res)
def search_events(self, q, user, page, category_id, admin_override_enabled):
filters = [
Event.title_matches(q),
~Event.is_deleted
]
if category_id is not None:
filters.append(Event.category_chain_overlaps(category_id))
query = (Event.query
.filter(*filters)
.options(subqueryload(Event.acl_entries), undefer(Event.effective_protection_mode)))
objs, pagenav = self._paginate(query, page, Event.id, user, admin_override_enabled)
res = HTMLStrippingEventSchema(many=True).dump(objs)
return pagenav, EventResultSchema(many=True).load(res)
| mit |
lyan6/genenetwork2 | wqflask/wqflask/views.py | 1 | 30443 | # -*- coding: utf-8 -*-
#
# Main routing table for GN2
from __future__ import absolute_import, division, print_function
import traceback # for error page
import os # for error gifs
import random # for random error gif
import datetime # for errors
import time # for errors
import sys
import csv
import xlsxwriter
import StringIO # Todo: Use cStringIO?
import gc
import cPickle as pickle
import uuid
import simplejson as json
import yaml
#Switching from Redis to StrictRedis; might cause some issues
import redis
Redis = redis.StrictRedis()
import flask
import base64
import array
import sqlalchemy
from wqflask import app
from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect
from wqflask import search_results
from wqflask import export_traits
from wqflask import gsearch
from wqflask import update_search_results
from wqflask import docs
from wqflask import news
from wqflask.submit_bnw import get_bnw_input
from base.data_set import DataSet # Used by YAML in marker_regression
from wqflask.show_trait import show_trait
from wqflask.show_trait import export_trait_data
from wqflask.heatmap import heatmap
from wqflask.marker_regression import marker_regression
from wqflask.marker_regression import marker_regression_gn1
from wqflask.network_graph import network_graph
from wqflask.correlation import show_corr_results
from wqflask.correlation_matrix import show_corr_matrix
from wqflask.correlation import corr_scatter_plot
from wqflask.wgcna import wgcna_analysis
from wqflask.ctl import ctl_analysis
#from wqflask.trait_submission import submit_trait
from utility import temp_data
from utility.tools import SQL_URI,TEMPDIR,USE_REDIS,USE_GN_SERVER,GN_SERVER_URL,GN_VERSION,JS_TWITTER_POST_FETCHER_PATH
from utility.helper_functions import get_species_groups
from base import webqtlFormData
from base.webqtlConfig import GENERATED_IMAGE_DIR
from utility.benchmark import Bench
from pprint import pformat as pf
from wqflask import user_manager
from wqflask import collect
from wqflask.database import db_session
import werkzeug
import utility.logger
logger = utility.logger.getLogger(__name__ )
@app.before_request
def connect_db():
db = getattr(g, '_database', None)
if db is None:
logger.debug("Get new database connector")
g.db = g._database = sqlalchemy.create_engine(SQL_URI)
logger.debug(g.db)
@app.teardown_appcontext
def shutdown_session(exception=None):
db = getattr(g, '_database', None)
if db is not None:
logger.debug("remove db_session")
db_session.remove()
g.db = None
#@app.before_request
#def trace_it():
# from wqflask import tracer
# tracer.turn_on()
@app.errorhandler(Exception)
def handle_bad_request(e):
err_msg = str(e)
logger.error(err_msg)
logger.error(request.url)
# get the stack trace and send it to the logger
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(traceback.format_exc())
now = datetime.datetime.utcnow()
time_str = now.strftime('%l:%M%p UTC %b %d, %Y')
formatted_lines = [request.url + " ("+time_str+")"]+traceback.format_exc().splitlines()
# Handle random animations
# Use a cookie to have one animation on refresh
animation = request.cookies.get(err_msg[:32])
if not animation:
list = [fn for fn in os.listdir("./wqflask/static/gif/error") if fn.endswith(".gif") ]
animation = random.choice(list)
resp = make_response(render_template("error.html",message=err_msg,stack=formatted_lines,error_image=animation,version=GN_VERSION))
# logger.error("Set cookie %s with %s" % (err_msg, animation))
resp.set_cookie(err_msg[:32],animation)
return resp
@app.route("/")
def index_page():
logger.info("Sending index_page")
logger.error(request.url)
params = request.args
if 'import_collections' in params:
import_collections = params['import_collections']
if import_collections == "true":
g.cookie_session.import_traits_to_user()
if USE_GN_SERVER:
# The menu is generated using GN_SERVER
return render_template("index_page.html", gn_server_url = GN_SERVER_URL, version=GN_VERSION)
else:
# Old style static menu (OBSOLETE)
return render_template("index_page_orig.html", version=GN_VERSION)
@app.route("/tmp/<img_path>")
def tmp_page(img_path):
logger.info("In tmp_page")
logger.info("img_path:", img_path)
logger.error(request.url)
initial_start_vars = request.form
logger.info("initial_start_vars:", initial_start_vars)
imgfile = open(GENERATED_IMAGE_DIR + img_path, 'rb')
imgdata = imgfile.read()
imgB64 = imgdata.encode("base64")
bytesarray = array.array('B', imgB64)
return render_template("show_image.html",
img_base64 = bytesarray )
@app.route("/twitter/<path:filename>")
def twitter(filename):
return send_from_directory(JS_TWITTER_POST_FETCHER_PATH, filename)
#@app.route("/data_sharing")
#def data_sharing_page():
# logger.info("In data_sharing")
# fd = webqtlFormData.webqtlFormData(request.args)
# logger.info("1Have fd")
# sharingInfoObject = SharingInfo.SharingInfo(request.args['GN_AccessionId'], None)
# info, htmlfilelist = sharingInfoObject.getBody(infoupdate="")
# logger.info("type(htmlfilelist):", type(htmlfilelist))
# htmlfilelist = htmlfilelist.encode("utf-8")
# #template_vars = SharingInfo.SharingInfo(request.args['GN_AccessionId'], None)
# logger.info("1 Made it to rendering")
# return render_template("data_sharing.html",
# info=info,
# htmlfilelist=htmlfilelist)
@app.route("/search", methods=('GET',))
def search_page():
logger.info("in search_page")
logger.error(request.url)
if 'info_database' in request.args:
logger.info("Going to sharing_info_page")
template_vars = sharing_info_page()
if template_vars.redirect_url:
logger.info("Going to redirect")
return flask.redirect(template_vars.redirect_url)
else:
return render_template("data_sharing.html", **template_vars.__dict__)
else:
result = None
if USE_REDIS:
with Bench("Trying Redis cache"):
key = "search_results:v1:" + json.dumps(request.args, sort_keys=True)
logger.debug("key is:", pf(key))
result = Redis.get(key)
if result:
logger.info("Redis cache hit on search results!")
result = pickle.loads(result)
else:
logger.info("Skipping Redis cache (USE_REDIS=False)")
logger.info("request.args is", request.args)
the_search = search_results.SearchResultPage(request.args)
result = the_search.__dict__
logger.debugf("result", result)
if USE_REDIS:
Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
Redis.expire(key, 60*60)
if result['search_term_exists']:
return render_template("search_result_page.html", **result)
else:
return render_template("search_error.html")
@app.route("/gsearch", methods=('GET',))
def gsearchact():
logger.error(request.url)
result = gsearch.GSearch(request.args).__dict__
type = request.args['type']
if type == "gene":
return render_template("gsearch_gene.html", **result)
elif type == "phenotype":
return render_template("gsearch_pheno.html", **result)
@app.route("/gsearch_updating", methods=('POST',))
def gsearch_updating():
logger.info("REQUEST ARGS:", request.values)
logger.error(request.url)
result = update_search_results.GSearch(request.args).__dict__
return result['results']
# type = request.args['type']
# if type == "gene":
# return render_template("gsearch_gene_updating.html", **result)
# elif type == "phenotype":
# return render_template("gsearch_pheno.html", **result)
@app.route("/docedit")
def docedit():
logger.error(request.url)
doc = docs.Docs(request.args['entry'])
return render_template("docedit.html", **doc.__dict__)
@app.route('/generated/<filename>')
def generated_file(filename):
logger.error(request.url)
return send_from_directory(GENERATED_IMAGE_DIR,filename)
@app.route("/help")
def help():
logger.error(request.url)
doc = docs.Docs("help")
return render_template("docs.html", **doc.__dict__)
@app.route("/wgcna_setup", methods=('POST',))
def wcgna_setup():
logger.info("In wgcna, request.form is:", request.form) # We are going to get additional user input for the analysis
logger.error(request.url)
return render_template("wgcna_setup.html", **request.form) # Display them using the template
@app.route("/wgcna_results", methods=('POST',))
def wcgna_results():
logger.info("In wgcna, request.form is:", request.form)
logger.error(request.url)
wgcna = wgcna_analysis.WGCNA() # Start R, load the package and pointers and create the analysis
wgcnaA = wgcna.run_analysis(request.form) # Start the analysis, a wgcnaA object should be a separate long running thread
result = wgcna.process_results(wgcnaA) # After the analysis is finished store the result
return render_template("wgcna_results.html", **result) # Display them using the template
@app.route("/ctl_setup", methods=('POST',))
def ctl_setup():
logger.info("In ctl, request.form is:", request.form) # We are going to get additional user input for the analysis
logger.error(request.url)
return render_template("ctl_setup.html", **request.form) # Display them using the template
@app.route("/ctl_results", methods=('POST',))
def ctl_results():
logger.info("In ctl, request.form is:", request.form)
logger.error(request.url)
ctl = ctl_analysis.CTL() # Start R, load the package and pointers and create the analysis
ctlA = ctl.run_analysis(request.form) # Start the analysis, a ctlA object should be a separate long running thread
result = ctl.process_results(ctlA) # After the analysis is finished store the result
return render_template("ctl_results.html", **result) # Display them using the template
@app.route("/news")
def news_route():
newsobject = news.News()
return render_template("news.html", **newsobject.__dict__)
@app.route("/references")
def references():
# doc = docs.Docs("references")
# return render_template("docs.html", **doc.__dict__)
return render_template("reference.html")
@app.route("/intro")
def intro():
doc = docs.Docs("intro")
return render_template("docs.html", **doc.__dict__)
@app.route("/policies")
def policies():
doc = docs.Docs("policies")
return render_template("docs.html", **doc.__dict__)
@app.route("/links")
def links():
doc = docs.Docs("links")
return render_template("docs.html", **doc.__dict__)
@app.route("/environments")
def environments():
doc = docs.Docs("environments")
return render_template("docs.html", **doc.__dict__)
@app.route("/submit_trait")
def submit_trait_form():
logger.error(request.url)
species_and_groups = get_species_groups()
return render_template("submit_trait.html", **{'species_and_groups' : species_and_groups, 'gn_server_url' : GN_SERVER_URL, 'version' : GN_VERSION})
@app.route("/create_temp_trait", methods=('POST',))
def create_temp_trait():
logger.error(request.url)
print("REQUEST.FORM:", request.form)
#template_vars = submit_trait.SubmitTrait(request.form)
doc = docs.Docs("links")
return render_template("links.html", **doc.__dict__)
#return render_template("show_trait.html", **template_vars.__dict__)
@app.route('/export_trait_excel', methods=('POST',))
def export_trait_excel():
"""Excel file consisting of the sample data from the trait data and analysis page"""
logger.info("In export_trait_excel")
logger.info("request.form:", request.form)
logger.error(request.url)
sample_data = export_trait_data.export_sample_table(request.form)
logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
buff = StringIO.StringIO()
workbook = xlsxwriter.Workbook(buff, {'in_memory': True})
worksheet = workbook.add_worksheet()
for i, row in enumerate(sample_data):
worksheet.write(i, 0, row[0])
worksheet.write(i, 1, row[1])
if len(row) > 2:
worksheet.write(i, 2, row[2])
workbook.close()
excel_data = buff.getvalue()
buff.close()
return Response(excel_data,
mimetype='application/vnd.ms-excel',
headers={"Content-Disposition":"attachment;filename=sample_data.xlsx"})
@app.route('/export_trait_csv', methods=('POST',))
def export_trait_csv():
"""CSV file consisting of the sample data from the trait data and analysis page"""
logger.info("In export_trait_csv")
logger.info("request.form:", request.form)
logger.error(request.url)
sample_data = export_trait_data.export_sample_table(request.form)
logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
buff = StringIO.StringIO()
writer = csv.writer(buff)
for row in sample_data:
writer.writerow(row)
csv_data = buff.getvalue()
buff.close()
return Response(csv_data,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=sample_data.csv"})
@app.route('/export_traits_csv', methods=('POST',))
def export_traits_csv():
"""CSV file consisting of the traits from the search result page"""
logger.info("In export_traits_csv")
logger.info("request.form:", request.form)
logger.error(request.url)
csv_data = export_traits.export_search_results_csv(request.form)
return Response(csv_data,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=trait_list.csv"})
@app.route('/export_perm_data', methods=('POST',))
def export_perm_data():
"""CSV file consisting of the permutation data for the mapping results"""
logger.error(request.url)
num_perm = float(request.form['num_perm'])
perm_data = json.loads(request.form['perm_results'])
buff = StringIO.StringIO()
writer = csv.writer(buff)
writer.writerow(["Suggestive LRS (p=0.63) = " + str(perm_data[int(num_perm*0.37-1)])])
writer.writerow(["Significant LRS (p=0.05) = " + str(perm_data[int(num_perm*0.95-1)])])
writer.writerow(["Highly Significant LRS (p=0.01) = " + str(perm_data[int(num_perm*0.99-1)])])
writer.writerow("")
writer.writerow([str(num_perm) + " Permutations"])
writer.writerow("")
for item in perm_data:
writer.writerow([item])
csv_data = buff.getvalue()
buff.close()
return Response(csv_data,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=perm_data.csv"})
@app.route("/show_temp_trait", methods=('POST',))
def show_temp_trait_page():
logger.error(request.url)
template_vars = show_trait.ShowTrait(request.form)
#logger.info("js_data before dump:", template_vars.js_data)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
# Sorting the keys messes up the ordered dictionary, so don't do that
#sort_keys=True)
#logger.info("js_data after dump:", template_vars.js_data)
#logger.info("show_trait template_vars:", pf(template_vars.__dict__))
return render_template("show_trait.html", **template_vars.__dict__)
@app.route("/show_trait")
def show_trait_page():
logger.error(request.url)
template_vars = show_trait.ShowTrait(request.args)
#logger.info("js_data before dump:", template_vars.js_data)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
# Sorting the keys messes up the ordered dictionary, so don't do that
#sort_keys=True)
#logger.info("js_data after dump:", template_vars.js_data)
#logger.info("show_trait template_vars:", pf(template_vars.__dict__))
return render_template("show_trait.html", **template_vars.__dict__)
@app.route("/heatmap", methods=('POST',))
def heatmap_page():
logger.info("In heatmap, request.form is:", pf(request.form))
logger.error(request.url)
start_vars = request.form
temp_uuid = uuid.uuid4()
traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
if traits[0] != "":
version = "v5"
key = "heatmap:{}:".format(version) + json.dumps(start_vars, sort_keys=True)
logger.info("key is:", pf(key))
with Bench("Loading cache"):
result = Redis.get(key)
if result:
logger.info("Cache hit!!!")
with Bench("Loading results"):
result = pickle.loads(result)
else:
logger.info("Cache miss!!!")
template_vars = heatmap.Heatmap(request.form, temp_uuid)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
result = template_vars.__dict__
for item in template_vars.__dict__.keys():
logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))
pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
logger.info("pickled result length:", len(pickled_result))
Redis.set(key, pickled_result)
Redis.expire(key, 60*60)
with Bench("Rendering template"):
rendered_template = render_template("heatmap.html", **result)
else:
rendered_template = render_template("empty_collection.html", **{'tool':'Heatmap'})
return rendered_template
@app.route("/mapping_results_container")
def mapping_results_container_page():
return render_template("mapping_results_container.html")
@app.route("/loading", methods=('POST',))
def loading_page():
logger.error(request.url)
initial_start_vars = request.form
logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items())
#temp_uuid = initial_start_vars['temp_uuid']
wanted = (
'temp_uuid',
'trait_id',
'dataset',
'method',
'trimmed_markers',
'selected_chr',
'chromosomes',
'mapping_scale',
'score_type',
'suggestive',
'significant',
'num_perm',
'permCheck',
'perm_output',
'num_bootstrap',
'bootCheck',
'bootstrap_results',
'LRSCheck',
'covariates',
'maf',
'use_loco',
'manhattan_plot',
'control_marker',
'control_marker_db',
'do_control',
'genofile',
'pair_scan',
'startMb',
'endMb',
'graphWidth',
'lrsMax',
'additiveCheck',
'showSNP',
'showGenes',
'viewLegend',
'haplotypeAnalystCheck',
'mapmethod_rqtl_geno',
'mapmodel_rqtl_geno'
)
start_vars_container = {}
start_vars = {}
for key, value in initial_start_vars.iteritems():
if key in wanted or key.startswith(('value:')):
start_vars[key] = value
start_vars_container['start_vars'] = start_vars
rendered_template = render_template("loading.html", **start_vars_container)
return rendered_template
@app.route("/marker_regression", methods=('POST',))
def marker_regression_page():
initial_start_vars = request.form
logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items())
logger.error(request.url)
temp_uuid = initial_start_vars['temp_uuid']
wanted = (
'trait_id',
'dataset',
'geno_db_exists',
'method',
'mapping_results_path',
'trimmed_markers',
'selected_chr',
'chromosomes',
'mapping_scale',
'plotScale',
'score_type',
'suggestive',
'significant',
'num_perm',
'permCheck',
'perm_output',
'num_bootstrap',
'bootCheck',
'bootstrap_results',
'LRSCheck',
'covariates',
'maf',
'use_loco',
'manhattan_plot',
'control_marker',
'control_marker_db',
'do_control',
'genofile',
'pair_scan',
'startMb',
'endMb',
'graphWidth',
'lrsMax',
'additiveCheck',
'showSNP',
'showGenes',
'viewLegend',
'haplotypeAnalystCheck',
'mapmethod_rqtl_geno',
'mapmodel_rqtl_geno'
)
start_vars = {}
for key, value in initial_start_vars.iteritems():
if key in wanted or key.startswith(('value:')):
start_vars[key] = value
logger.debug("Marker regression called with start_vars:", start_vars)
version = "v3"
key = "marker_regression:{}:".format(version) + json.dumps(start_vars, sort_keys=True)
logger.info("key is:", pf(key))
with Bench("Loading cache"):
result = None # Just for testing
#result = Redis.get(key)
#logger.info("************************ Starting result *****************")
#logger.info("result is [{}]: {}".format(type(result), result))
#logger.info("************************ Ending result ********************")
if result:
logger.info("Cache hit!!!")
with Bench("Loading results"):
result = pickle.loads(result)
else:
logger.info("Cache miss!!!")
with Bench("Total time in MarkerRegression"):
template_vars = marker_regression.MarkerRegression(start_vars, temp_uuid)
if template_vars.mapping_method != "gemma" and template_vars.mapping_method != "plink":
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
result = template_vars.__dict__
if result['pair_scan']:
with Bench("Rendering template"):
img_path = result['pair_scan_filename']
logger.info("img_path:", img_path)
initial_start_vars = request.form
logger.info("initial_start_vars:", initial_start_vars)
imgfile = open(TEMPDIR + img_path, 'rb')
imgdata = imgfile.read()
imgB64 = imgdata.encode("base64")
bytesarray = array.array('B', imgB64)
result['pair_scan_array'] = bytesarray
rendered_template = render_template("pair_scan_results.html", **result)
else:
#for item in template_vars.__dict__.keys():
# logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))
gn1_template_vars = marker_regression_gn1.MarkerRegression(result).__dict__
#pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
#logger.info("pickled result length:", len(pickled_result))
#Redis.set(key, pickled_result)
#Redis.expire(key, 1*60)
with Bench("Rendering template"):
if (gn1_template_vars['mapping_method'] == "gemma") or (gn1_template_vars['mapping_method'] == "plink"):
gn1_template_vars.pop('qtlresults', None)
print("TEMPLATE KEYS:", list(gn1_template_vars.keys()))
rendered_template = render_template("marker_regression_gn1.html", **gn1_template_vars)
# with Bench("Rendering template"):
# if result['pair_scan'] == True:
# img_path = result['pair_scan_filename']
# logger.info("img_path:", img_path)
# initial_start_vars = request.form
# logger.info("initial_start_vars:", initial_start_vars)
# imgfile = open(TEMPDIR + '/' + img_path, 'rb')
# imgdata = imgfile.read()
# imgB64 = imgdata.encode("base64")
# bytesarray = array.array('B', imgB64)
# result['pair_scan_array'] = bytesarray
# rendered_template = render_template("pair_scan_results.html", **result)
# else:
# rendered_template = render_template("marker_regression.html", **result)
# rendered_template = render_template("marker_regression_gn1.html", **gn1_template_vars)
return rendered_template
@app.route("/export_mapping_results", methods = ('POST',))
def export_mapping_results():
logger.info("request.form:", request.form)
logger.error(request.url)
file_path = request.form.get("results_path")
results_csv = open(file_path, "r").read()
response = Response(results_csv,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=mapping_results.csv"})
return response
@app.route("/export", methods = ('POST',))
def export():
logger.info("request.form:", request.form)
logger.error(request.url)
svg_xml = request.form.get("data", "Invalid data")
filename = request.form.get("filename", "manhattan_plot_snp")
response = Response(svg_xml, mimetype="image/svg+xml")
response.headers["Content-Disposition"] = "attachment; filename=%s"%filename
return response
@app.route("/export_pdf", methods = ('POST',))
def export_pdf():
import cairosvg
logger.info("request.form:", request.form)
logger.error(request.url)
svg_xml = request.form.get("data", "Invalid data")
logger.info("svg_xml:", svg_xml)
filename = request.form.get("filename", "interval_map_pdf")
filepath = GENERATED_IMAGE_DIR+filename
pdf_file = cairosvg.svg2pdf(bytestring=svg_xml)
response = Response(pdf_file, mimetype="application/pdf")
response.headers["Content-Disposition"] = "attachment; filename=%s"%filename
return response
@app.route("/network_graph", methods=('POST',))
def network_graph_page():
logger.info("In network_graph, request.form is:", pf(request.form))
logger.error(request.url)
start_vars = request.form
traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
if traits[0] != "":
template_vars = network_graph.NetworkGraph(start_vars)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
return render_template("network_graph.html", **template_vars.__dict__)
else:
return render_template("empty_collection.html", **{'tool':'Network Graph'})
@app.route("/corr_compute", methods=('POST',))
def corr_compute_page():
logger.info("In corr_compute, request.form is:", pf(request.form))
logger.error(request.url)
#fd = webqtlFormData.webqtlFormData(request.form)
template_vars = show_corr_results.CorrelationResults(request.form)
return render_template("correlation_page.html", **template_vars.__dict__)
@app.route("/corr_matrix", methods=('POST',))
def corr_matrix_page():
logger.info("In corr_matrix, request.form is:", pf(request.form))
logger.error(request.url)
start_vars = request.form
traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
if traits[0] != "":
template_vars = show_corr_matrix.CorrelationMatrix(start_vars)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
return render_template("correlation_matrix.html", **template_vars.__dict__)
else:
return render_template("empty_collection.html", **{'tool':'Correlation Matrix'})
@app.route("/corr_scatter_plot")
def corr_scatter_plot_page():
logger.error(request.url)
template_vars = corr_scatter_plot.CorrScatterPlot(request.args)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
return render_template("corr_scatterplot.html", **template_vars.__dict__)
@app.route("/submit_bnw", methods=('POST',))
def submit_bnw():
logger.error(request.url)
template_vars = get_bnw_input(request.form)
return render_template("empty_collection.html", **{'tool':'Correlation Matrix'})
# Todo: Can we simplify this? -Sam
def sharing_info_page():
"""Info page displayed when the user clicks the "Info" button next to the dataset selection"""
logger.info("In sharing_info_page")
logger.error(request.url)
fd = webqtlFormData.webqtlFormData(request.args)
template_vars = SharingInfoPage.SharingInfoPage(fd)
return template_vars
# Take this out or secure it before putting into production
@app.route("/get_temp_data")
def get_temp_data():
logger.error(request.url)
temp_uuid = request.args['key']
return flask.jsonify(temp_data.TempData(temp_uuid).get_all())
##########################################################################
def json_default_handler(obj):
'''Based on http://stackoverflow.com/a/2680060/1175849'''
# Handle datestamps
if hasattr(obj, 'isoformat'):
return obj.isoformat()
# Handle integer keys for dictionaries
elif isinstance(obj, int):
return str(int)
# Handle custom objects
if hasattr(obj, '__dict__'):
return obj.__dict__
#elif type(obj) == "Dataset":
# logger.info("Not going to serialize Dataset")
# return None
else:
raise TypeError, 'Object of type %s with value of %s is not JSON serializable' % (
type(obj), repr(obj))
| agpl-3.0 |
hmpf/nav | python/nav/topology/detector.py | 2 | 5910 | #
# Copyright (C) 2011, 2012 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""NAV network topology detection program"""
from __future__ import print_function
from argparse import ArgumentParser
from functools import wraps
import inspect
import logging
import sys
import atexit
from django.db.models import Q
import django.db
from nav import buildconf
from nav import daemon
from nav.debug import log_stacktrace, log_last_django_query
from nav.logs import init_generic_logging
from nav.topology.layer2 import update_layer2_topology
from nav.topology.analyze import (AdjacencyReducer, build_candidate_graph_from_db,
get_aggregate_mapping)
from nav.topology.vlan import VlanGraphAnalyzer, VlanTopologyUpdater
from nav.models.manage import Vlan, Prefix
LOG_FILE = 'navtopology.log'
PID_FILE = 'navtopology.pid'
_logger = logging.getLogger(__name__)
def main():
"""Program entry point"""
parser = make_option_parser()
options = parser.parse_args()
init_generic_logging(
logfile=LOG_FILE,
stderr=options.stderr,
stdout=True,
read_config=True,
)
if options.l2 or options.vlan:
# protect against multiple invocations of long-running jobs
verify_singleton()
if options.l2:
do_layer2_detection()
if options.vlan:
if options.include_vlans:
vlans = [int(v) for v in options.include_vlans]
else:
vlans = []
do_vlan_detection(vlans)
delete_unused_prefixes()
delete_unused_vlans()
def int_list(value):
return [int(x) for x in value.split(",")]
def make_option_parser():
"""Sets up and returns a command line option parser."""
parser = ArgumentParser(
description=("Detects and updates the network topology in your NAV "
"database")
)
parser.add_argument('--version', action='version',
version='NAV ' + buildconf.VERSION)
parser.add_argument("--l2", action="store_true",
help="Detect physical topology")
parser.add_argument("--vlan", action="store_true",
help="Detect vlan subtopologies")
parser.add_argument("-i", dest="include_vlans", type=int_list,
metavar="vlan[,...]",
help="Only analyze the VLANs included in this list")
parser.add_argument("-s", "--stderr", action="store_true",
help="Log to stderr (even if not a tty)")
return parser
def with_exception_logging(func):
"""Decorates a function to log unhandled exceptions"""
def _decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
stacktrace = inspect.trace()[1:]
_logger = logging.getLogger(__name__)
_logger.exception("An unhandled exception occurred")
log_last_django_query(_logger)
log_stacktrace(logging.getLogger('nav.topology.stacktrace'),
stacktrace)
raise
return wraps(func)(_decorator)
@with_exception_logging
def do_layer2_detection():
"""Detect and update layer 2 topology"""
candidates = build_candidate_graph_from_db()
aggregates = get_aggregate_mapping(include_stacks=True)
reducer = AdjacencyReducer(candidates, aggregates)
reducer.reduce()
links = reducer.get_single_edges_from_ports()
update_layer2_topology(links)
@with_exception_logging
def do_vlan_detection(vlans):
analyzer = VlanGraphAnalyzer()
if vlans:
analyzer.analyze_vlans_by_id(vlans)
else:
analyzer.analyze_all()
ifc_vlan_map = analyzer.add_access_port_vlans()
update = VlanTopologyUpdater(ifc_vlan_map)
update()
@with_exception_logging
def delete_unused_vlans():
"""Deletes vlans unassociated with prefixes or switch ports"""
unused = Vlan.objects.filter(prefix__isnull=True, swportvlan__isnull=True)
if unused:
_logger.info("deleting unused vlans: %r", unused)
unused.delete()
@with_exception_logging
def delete_unused_prefixes():
"""
Deletes prefixes that are unassociated with any router port and were not
manually entered into NAV.
"""
holy_vlans = Q(vlan__net_type__in=('scope', 'reserved', 'static'))
unused_prefixes = Prefix.objects.filter(
gwportprefix__isnull=True).exclude(holy_vlans)
if unused_prefixes:
_logger.info("deleting unused prefixes: %s",
", ".join(p.net_address for p in unused_prefixes))
cursor = django.db.connection.cursor()
# Use raw SQL to avoid Django's emulated cascading deletes
cursor.execute('DELETE FROM prefix WHERE prefixid IN %s',
(tuple([p.id for p in unused_prefixes]), ))
def verify_singleton():
"""Verifies that we are the single running navtopology process.
If a navtopology process is already running, we exit this process.
"""
try:
daemon.justme(PID_FILE)
except daemon.AlreadyRunningError as error:
print("navtopology is already running (%d)" % error.pid, file=sys.stderr)
sys.exit(1)
daemon.writepidfile(PID_FILE)
atexit.register(daemon.daemonexit, PID_FILE)
if __name__ == '__main__':
main()
| gpl-3.0 |
imarqs/ardupilot | Tools/LogAnalyzer/tests/TestEvents.py | 273 | 1798 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestEvents(Test):
'''test for erroneous events and failsafes'''
# TODO: need to check for vehicle-specific codes
def __init__(self):
Test.__init__(self)
self.name = "Event/Failsafe"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
errors = set()
if "ERR" in logdata.channels:
assert(len(logdata.channels["ERR"]["Subsys"].listData) == len(logdata.channels["ERR"]["ECode"].listData))
for i in range(len(logdata.channels["ERR"]["Subsys"].listData)):
subSys = logdata.channels["ERR"]["Subsys"].listData[i][1]
eCode = logdata.channels["ERR"]["ECode"].listData[i][1]
if subSys == 2 and (eCode == 1):
errors.add("PPM")
elif subSys == 3 and (eCode == 1 or eCode == 2):
errors.add("COMPASS")
elif subSys == 5 and (eCode == 1):
errors.add("FS_THR")
elif subSys == 6 and (eCode == 1):
errors.add("FS_BATT")
elif subSys == 7 and (eCode == 1):
errors.add("GPS")
elif subSys == 8 and (eCode == 1):
errors.add("GCS")
elif subSys == 9 and (eCode == 1 or eCode == 2):
errors.add("FENCE")
elif subSys == 10:
errors.add("FLT_MODE")
elif subSys == 11 and (eCode == 2):
errors.add("GPS_GLITCH")
elif subSys == 12 and (eCode == 1):
errors.add("CRASH")
if errors:
if len(errors) == 1 and "FENCE" in errors:
self.result.status = TestResult.StatusType.WARN
else:
self.result.status = TestResult.StatusType.FAIL
if len(errors) == 1:
self.result.statusMessage = "ERR found: "
else:
self.result.statusMessage = "ERRs found: "
for err in errors:
self.result.statusMessage = self.result.statusMessage + err + " "
| gpl-3.0 |
Praetonus/Arc | src/arc/huffman.py | 1 | 5696 | #!/usr/bin/python3.4
#-*- coding: utf-8 -*-
########################################################################
# Copyright 2014 Benoît Vey #
# #
# This file is part of Arc. #
# #
# Arc is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# Arc is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Arc. If not, see <http://www.gnu.org/licenses/>. #
########################################################################
import ctypes
def compress(pathI, pathO):
with open(pathI, "rb") as inputFile:
freqs = frequencies(inputFile)
rootNode = makeTree(freqs)
codes = makeCodes(rootNode)
inputFile.seek(0)
cmpStr = compressedString(inputFile, codes)
with open(pathO, "wb") as outputFile:
cmpWrite(outputFile, codes, cmpStr)
def decompress(pathI, pathO):
with open(pathI, "rb") as inputFile:
freqMap = makeFreqMap(inputFile)
cmpStr = makeString(inputFile)
with open(pathO, "wb") as outputFile:
decmpWrite(outputFile, freqMap, cmpStr)
class Node:
def __init__(self, char, weight):
self.leftLeaf = None
self.rightLeaf = None
self.char = char
self.weight = weight
self.isEnd = True
self.pastLeft = True
self.pastRight = True
def frequencies(inputFile):
freqs = {}
char = bytes()
while 1:
char = inputFile.read(1)
if char == b"":
break
if char in freqs:
freqs[char] += 1
else:
freqs[char] = 1
return freqs
def makeTree(freqs):
nodes = []
for char, weight in freqs.items():
nodes.append(ctypes.pointer(ctypes.py_object(Node(char, weight))))
while len(nodes) > 1:
node1 = nodes[0]
nodes.remove(nodes[0])
node2 = nodes[0]
nodes.remove(nodes[0])
newNode = ctypes.pointer(ctypes.py_object(Node(b"", node1[0].weight + node2[0].weight)))
newNode[0].leftLeaf = node1
newNode[0].rightLeaf= node2
newNode[0].isEnd = False
newNode[0].pastLeft = False
newNode[0].pastRight = False
i = 0
while i < len(nodes) and nodes[i][0].weight < newNode[0].weight:
i += 1
nodes.insert(i, newNode)
return nodes[0]
def makeCodes(root):
codes = {}
while 1:
currentNode = root
code = str()
blocked = False
while not currentNode[0].isEnd and not blocked:
if not currentNode[0].pastLeft:
if currentNode[0].leftLeaf[0].pastLeft and currentNode[0].leftLeaf[0].pastRight:
currentNode[0].pastLeft = True
currentNode = currentNode[0].leftLeaf
code += "0"
elif not currentNode[0].pastRight:
if currentNode[0].rightLeaf[0].pastLeft and currentNode[0].rightLeaf[0].pastRight:
currentNode[0].pastRight = True
currentNode = currentNode[0].rightLeaf
code += "1"
else:
blocked = True
if currentNode[0].isEnd:
codes[currentNode[0].char] = code
currentNode[0].pastLeft = True
currentNode[0].pastRight = True
if blocked and currentNode == root:
break
return codes
def compressedString(inputFile, codes):
cmpStr = str()
char = bytes()
while 1:
char = inputFile.read(1)
if char == b"":
break
if char in codes:
cmpStr += codes[char]
while len(cmpStr) % 8 != 0:
cmpStr += "0"
return cmpStr
def cmpWrite(outputFile, codes, cmpStr):
outputFile.write(len(codes).to_bytes(1, "little"))
for char, code in codes.items():
outputFile.write(char)
if (code[0] == "0"):
while len(code) < 8:
code = "1" + code
else:
while len(code) < 8:
code = "0" + code
value = 0
for i in range(0, 8):
if code[7 - i] == "1":
value += 2 ** i
outputFile.write(value.to_bytes(1, "little"))
value = 0
count = 0
for char in cmpStr:
if char == "1":
value += 2 ** (7 - count)
if count == 7:
outputFile.write(value.to_bytes(1, "little"))
value = 0
count = 0
else:
count += 1
def makeFreqMap(inputFile):
freqMap = {}
size = int.from_bytes(inputFile.read(1), "little")
for i in range(0, size):
char = int.from_bytes(inputFile.read(1), "little")
strValue = int.from_bytes(inputFile.read(1), "little")
strCode = []
j = 7
while j >= 0:
if strValue >= 2 ** j:
strValue -= 2 ** j
strCode.append("1")
else:
strCode.append("0")
j -= 1
if strCode[0] == "1":
while strCode[0] == "1":
strCode.pop(0)
else:
while strCode[0] == "0":
strCode.pop(0)
freqMap[''.join(strCode)] = char
return freqMap
def makeString(inputFile):
cmpStr = []
byteStr = bytes()
byte = 0
while 1:
byteStr = inputFile.read(1)
if byteStr == b"":
break
byte = int.from_bytes(byteStr, "little")
i = 7
while i >= 0:
if byte >= 2 ** i:
byte -= 2 ** i
cmpStr.append("1")
else:
cmpStr.append("0")
i -= 1
return cmpStr
def decmpWrite(outputFile, freqMap, cmpStr):
tmpStr = str()
while len(cmpStr) > 0:
tmpStr += cmpStr[0]
cmpStr.pop(0)
if tmpStr in freqMap:
outputFile.write(freqMap[tmpStr].to_bytes(1, "little"))
tmpStr = str()
| gpl-3.0 |
edisongustavo/asv | test/test_results.py | 2 | 1849 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from os.path import join
import six
from asv import results, util
import pytest
def test_results(tmpdir):
tmpdir = six.text_type(tmpdir)
resultsdir = join(tmpdir, "results")
for i in six.moves.xrange(10):
r = results.Results(
{'machine': 'foo',
'arch': 'x86_64'},
{},
hex(i),
i * 1000000,
'2.7')
for key, val in {
'suite1.benchmark1': float(i * 0.001),
'suite1.benchmark2': float(i * i * 0.001),
'suite2.benchmark1': float((i + 1) ** -1)}.items():
r.add_time(key, val)
r.save(resultsdir)
r2 = results.Results.load(join(resultsdir, r._filename))
assert r2._results == r._results
assert r2.date == r.date
assert r2.commit_hash == r.commit_hash
def test_get_result_hash_from_prefix(tmpdir):
results_dir = tmpdir.mkdir('results')
machine_dir = results_dir.mkdir('machine')
for f in ['e5b6cdbc', 'e5bfoo12']:
open(join(str(machine_dir), '{0}-py2.7-Cython-numpy1.8.json'.format(f)), 'a').close()
# check unique, valid case
full_commit = results.get_result_hash_from_prefix(str(results_dir), 'machine', 'e5b6')
assert full_commit == 'e5b6cdbc'
# check invalid hash case
bad_commit = results.get_result_hash_from_prefix(str(results_dir), 'machine', 'foobar')
assert bad_commit is None
# check non-unique case
with pytest.raises(util.UserError) as excinfo:
results.get_result_hash_from_prefix(str(results_dir), 'machine', 'e')
assert 'one of multiple commits' in str(excinfo.value)
| bsd-3-clause |
avaris/aBibliophile | searchmodel.py | 1 | 1847 | #!/usr/bin/env python
# -.- coding: utf-8 -.-
# Author : Deniz Turgut
# Created : 05.11.2011
from PyQt4 import QtGui, QtCore
class SearchModel(QtGui.QSortFilterProxyModel):
def __init__(self, parent=None):
super(SearchModel, self).__init__(parent)
self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.setSortLocaleAware(True)
self.setDynamicSortFilter(True)
self.setSourceModel(SearchBaseModel())
self.sort(0)
def clear(self):
self.sourceModel().clear()
def addDataFromList(self, bookList):
self.sourceModel().addDataFromList(bookList)
class SearchBaseModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None):
super(SearchBaseModel, self).__init__(parent)
self._data = []
def rowCount(self, parent):
return len(self._data)
def columnCount(self, parent):
return 1
def index(self, row, column, parent):
return self.createIndex(row, column, QtCore.QModelIndex())
def parent(self, index):
return QtCore.QModelIndex()
def data(self, index, role):
if role == QtCore.Qt.DisplayRole:
return self._data[index.row()]["title"]
elif role == QtCore.Qt.ToolTipRole:
writer = ", ".join(self._data[index.row()]["writers"])
publisher = self._data[index.row()]["publisher"]
return self.tr("Writer: %s\nPublisher: %s") % (writer, publisher)
elif role == QtCore.Qt.UserRole:
return self._data[index.row()]["url"]
def addData(self, data):
self._data.append(data)
def addDataFromList(self, dataList):
self.layoutAboutToBeChanged.emit()
for data in dataList:
self.addData(data)
self.layoutChanged.emit()
def clear(self):
self._data=[]
| gpl-3.0 |
larsks/cobbler-larsks | apitests/system_test.py | 12 | 2041 | """
Copyright 2009, Red Hat, Inc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from base import *
import time
class SystemTests(CobblerTest):
def setUp(self):
CobblerTest.setUp(self)
(self.distro_id, self.distro_name) = self.create_distro()
(self.profile_id, self.profile_name) = self.create_profile(self.distro_name)
def test_create_system(self):
""" Test creation of a cobbler system. """
(system_id, system_name) = self.create_system(self.profile_name)
systems = self.api.find_system({'name': system_name})
self.assertTrue(len(systems) > 0)
# Old tests laying around indicate this should pass, but it no longer seems too?
#def test_nopxe(self):
# """ Test network boot loop prevention. """
# (system_id, system_name) = self.create_system(self.profile_name)
# self.api.modify_system(system_id, 'netboot_enabled', True, self.token)
# self.api.save_system(system_id, self.token)
# #systems = self.api.find_system({'name': system_name})
# url = "http://%s/cblr/svc/op/nopxe/system/%s" % \
# (cfg['cobbler_server'], system_name)
# data = urlgrabber.urlread(url)
# time.sleep(2)
# results = self.api.get_blended_data("", system_name)
# print(results['netboot_enabled'])
# self.assertFalse(results['netboot_enabled'])
| gpl-2.0 |
piquadrat/django | tests/view_tests/generic_urls.py | 72 | 1313 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import RedirectView
from . import views
from .models import Article, DateArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = [
url(r'^accounts/login/$', auth_views.LoginView.as_view(template_name='login.html')),
url(r'^accounts/logout/$', auth_views.LogoutView.as_view()),
# Special URLs for particular regression cases.
url('^中文/target/$', views.index_page),
]
# redirects, both temporary and permanent, with non-ASCII targets
urlpatterns += [
url('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
url('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
]
# json response
urlpatterns += [
url(r'^json/response/$', views.json_response_view),
]
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.