repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
hzlf/openbroadcast | website/apps/alibrary/migrations/0100_auto__del_field_distributor_email_main__add_field_distributor_email.py | 1 | 53909 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Distributor.email_main'
db.delete_column('alibrary_distributor', 'email_main')
# Adding field 'Distributor.email'
db.add_column('alibrary_distributor', 'email',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Distributor.email_main'
db.add_column('alibrary_distributor', 'email_main',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
# Deleting field 'Distributor.email'
db.delete_column('alibrary_distributor', 'email')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'alibrary.apilookup': {
'Meta': {'ordering': "('created',)", 'object_name': 'APILookup'},
'api_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'provider': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'ressource_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.artist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Artist'},
'aliases': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aliases_rel_+'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disambiguation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Artist']", 'through': "orm['alibrary.ArtistMembership']", 'symmetrical': 'False'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Profession']", 'through': "orm['alibrary.ArtistProfessions']", 'symmetrical': 'False'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.artistmembership': {
'Meta': {'object_name': 'ArtistMembership'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_child'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_parent'", 'to': "orm['alibrary.Artist']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_membership_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.artistplugin': {
'Meta': {'object_name': 'ArtistPlugin', 'db_table': "'cmsplugin_artistplugin'", '_ormbases': ['cms.CMSPlugin']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'alibrary.artistprofessions': {
'Meta': {'object_name': 'ArtistProfessions'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Profession']"})
},
'alibrary.daypart': {
'Meta': {'ordering': "('day', 'time_start')", 'object_name': 'Daypart'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_end': ('django.db.models.fields.TimeField', [], {}),
'time_start': ('django.db.models.fields.TimeField', [], {})
},
'alibrary.distributor': {
'Meta': {'ordering': "('name',)", 'object_name': 'Distributor'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Distributor']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.format': {
'Meta': {'ordering': "('format', 'version')", 'object_name': 'Format'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_price': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'base'", 'max_length': '10'})
},
'alibrary.label': {
'Meta': {'ordering': "('name',)", 'object_name': 'Label'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'label_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelcode': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Label']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.license': {
'Meta': {'ordering': "('name',)", 'object_name': 'License'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'license_children'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'restricted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'405431c1-a180-4b22-8383-64392cf37ae9'", 'max_length': '36'})
},
'alibrary.licensetranslation': {
'Meta': {'ordering': "('language_code',)", 'unique_together': "(('language_code', 'master'),)", 'object_name': 'LicenseTranslation', 'db_table': "'alibrary_license_translation'"},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '15', 'blank': 'True'}),
'license_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['alibrary.License']"}),
'name_translated': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'alibrary.media': {
'Meta': {'ordering': "('tracknumber',)", 'object_name': 'Media'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_artist'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'base_bitrate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_duration': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'base_filesize': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_format': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'base_samplerate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'conversion_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'echoprint_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.MediaExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'lock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1'}),
'master': ('django.db.models.fields.files.FileField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'master_sha1': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'mediatype': ('django.db.models.fields.CharField', [], {'default': "'track'", 'max_length': '12'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_release'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Release']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tracknumber': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.mediaextraartists': {
'Meta': {'ordering': "('profession__name', 'artist__name')", 'object_name': 'MediaExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_media'", 'to': "orm['alibrary.Media']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.mediaformat': {
'Meta': {'ordering': "('name',)", 'object_name': 'Mediaformat'},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'alibrary.mediaplugin': {
'Meta': {'object_name': 'MediaPlugin', 'db_table': "'cmsplugin_mediaplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"})
},
'alibrary.playlist': {
'Meta': {'ordering': "('-updated',)", 'object_name': 'Playlist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'dayparts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'daypart_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Daypart']"}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '12', 'null': 'True'}),
'edit_mode': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.PlaylistItem']", 'null': 'True', 'through': "orm['alibrary.PlaylistItemPlaylist']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'seasons': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'season_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Season']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'weather': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'weather_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Weather']"})
},
'alibrary.playlistitem': {
'Meta': {'object_name': 'PlaylistItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistitemplaylist': {
'Meta': {'object_name': 'PlaylistItemPlaylist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.PlaylistItem']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistmedia': {
'Meta': {'object_name': 'PlaylistMedia'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.profession': {
'Meta': {'ordering': "('name',)", 'object_name': 'Profession'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'alibrary.relation': {
'Meta': {'ordering': "('url',)", 'object_name': 'Relation'},
'action': ('django.db.models.fields.CharField', [], {'default': "'information'", 'max_length': '50'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "'generic'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'})
},
'alibrary.release': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Release'},
'asin': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cover_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_cover_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.ReleaseExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_label'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Label']"}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'main_format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Mediaformat']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'media': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'to': "orm['alibrary.Media']", 'through': "orm['alibrary.ReleaseMedia']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'pressings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release_country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'releasedate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'releasedate_approx': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'releasestatus': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'releasetype': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'totaltracks': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.releaseextraartists': {
'Meta': {'object_name': 'ReleaseExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.releasemedia': {
'Meta': {'object_name': 'ReleaseMedia'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaseplugin': {
'Meta': {'object_name': 'ReleasePlugin', 'db_table': "'cmsplugin_releaseplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaserelations': {
'Meta': {'object_name': 'ReleaseRelations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_relation'", 'to': "orm['alibrary.Relation']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.season': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Season'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.weather': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Weather'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'arating.vote': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['alibrary'] | gpl-3.0 |
allmende/synnefo | snf-cyclades-app/synnefo/logic/tests/callbacks.py | 3 | 32469 | # vim: set fileencoding=utf-8 :
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Provides automated tests for logic module
from random import randint
from django.test import TestCase
from synnefo.db.models import (VirtualMachine, IPAddress, BackendNetwork,
Network, BridgePoolTable, MacPrefixPoolTable)
from synnefo.db import models_factory as mfactory
from synnefo.lib.utils import split_time
from datetime import datetime
from mock import patch
from synnefo.api.util import allocate_resource
from synnefo.logic.callbacks import (update_db, update_network,
update_build_progress)
from snf_django.utils.testing import mocked_quotaholder
from synnefo.logic.rapi import GanetiApiError
now = datetime.now
from time import time
import json
# Test Callbacks
@patch('synnefo.lib.amqp.AMQPClient')
class UpdateDBTest(TestCase):
def create_msg(self, **kwargs):
"""Create snf-ganeti-eventd message"""
msg = {'event_time': split_time(time())}
msg['type'] = 'ganeti-op-status'
msg['status'] = 'success'
msg['jobId'] = 1
msg['logmsg'] = 'Dummy Log'
for key, val in kwargs.items():
msg[key] = val
message = {'body': json.dumps(msg)}
return message
def test_missing_attribute(self, client):
update_db(client, json.dumps({'body': {}}))
self.assertTrue(client.basic_reject.called)
def test_unhandled_exception(self, client):
update_db(client, None)
self.assertEqual(1, client.basic_reject.call_count)
def test_not_json(self, client):
update_db(client, {'body': ''})
self.assertEqual(1, client.basic_nack.call_count)
def test_malformed_json(self, client):
update_db(client, {'body': '{}'})
self.assertEqual(1, client.basic_nack.call_count)
def test_missing_instance(self, client):
msg = self.create_msg(operation='OP_INSTANCE_STARTUP',
instance='foo')
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
def test_wrong_type(self, client):
msg = self.create_msg(type="WRONG_TYPE")
update_db(client, msg)
self.assertTrue(client.basic_nack.called)
def test_old_msg(self, client):
from time import sleep
from datetime import datetime
old_time = time()
sleep(0.01)
new_time = datetime.fromtimestamp(time())
vm = mfactory.VirtualMachineFactory(backendtime=new_time)
vm.operstate = 'STOPPED'
vm.save()
msg = self.create_msg(operation='OP_INSTANCE_STARTUP',
event_time=split_time(old_time),
instance=vm.backend_vm_id)
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEquals(db_vm.operstate, "STOPPED")
self.assertEquals(db_vm.backendtime, new_time)
def test_start(self, client):
vm = mfactory.VirtualMachineFactory()
msg = self.create_msg(operation='OP_INSTANCE_STARTUP',
instance=vm.backend_vm_id)
with mocked_quotaholder():
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'STARTED')
def test_stop(self, client):
vm = mfactory.VirtualMachineFactory()
msg = self.create_msg(operation='OP_INSTANCE_SHUTDOWN',
instance=vm.backend_vm_id)
with mocked_quotaholder():
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'STOPPED')
def test_reboot(self, client):
vm = mfactory.VirtualMachineFactory()
msg = self.create_msg(operation='OP_INSTANCE_REBOOT',
instance=vm.backend_vm_id)
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'STARTED')
def test_remove(self, client):
vm = mfactory.VirtualMachineFactory(flavor__cpu=1, flavor__ram=128)
mfactory.VolumeFactory(userid=vm.userid, machine=vm, size=1)
mfactory.VolumeFactory(userid=vm.userid, machine=vm, size=3)
# Also create a NIC
ip = mfactory.IPv4AddressFactory(nic__machine=vm)
nic = ip.nic
nic.network.get_ip_pools()[0].reserve(nic.ipv4_address)
msg = self.create_msg(operation='OP_INSTANCE_REMOVE',
instance=vm.backend_vm_id)
with mocked_quotaholder() as m:
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'DESTROYED')
self.assertTrue(db_vm.deleted)
# Check that nics are deleted
self.assertFalse(db_vm.nics.all())
self.assertTrue(nic.network.get_ip_pools()[0].is_available(ip.address))
# Check that volumes are deleted
self.assertFalse(db_vm.volumes.filter(deleted=False))
# Check quotas
name, args, kwargs = m.mock_calls[0]
for (userid, res), value in args[1].items():
if res == 'cyclades.disk':
self.assertEqual(value, -4 << 30)
elif res == 'cyclades.cpu':
self.assertEqual(value, -1)
elif res == 'cyclades.ram':
self.assertEqual(value, -128 << 20)
vm2 = mfactory.VirtualMachineFactory()
fp1 = mfactory.IPv4AddressFactory(nic__machine=vm2, floating_ip=True,
network__floating_ip_pool=True)
network = fp1.network
nic1 = mfactory.NetworkInterfaceFactory(machine=vm2)
fp1.nic = nic1
fp1.save()
pool = network.get_ip_pools()[0]
pool.reserve(fp1.address)
pool.save()
msg = self.create_msg(operation='OP_INSTANCE_REMOVE',
instance=vm2.backend_vm_id)
with mocked_quotaholder():
update_db(client, msg)
self.assertEqual(2, client.basic_ack.call_count)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'DESTROYED')
self.assertTrue(db_vm.deleted)
self.assertEqual(IPAddress.objects.get(id=fp1.id).nic, None)
pool = network.get_ip_pools()[0]
# Test that floating ips are not released
self.assertFalse(pool.is_available(fp1.address))
@patch("synnefo.logic.rapi_pool.GanetiRapiClient")
def test_remove_error(self, rapi, client):
vm = mfactory.VirtualMachineFactory()
# Also create a NIC
msg = self.create_msg(operation='OP_INSTANCE_REMOVE',
status="error",
instance=vm.backend_vm_id)
rapi().GetInstance.return_value = {}
update_db(client, msg)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertFalse(db_vm.deleted)
rapi().GetInstance.side_effect = GanetiApiError(msg="msg",
code=503)
update_db(client, msg)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertFalse(db_vm.deleted)
rapi().GetInstance.side_effect = GanetiApiError(msg="msg",
code=404)
with mocked_quotaholder():
update_db(client, msg)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertTrue(db_vm.deleted)
def test_create(self, client):
vm = mfactory.VirtualMachineFactory()
msg = self.create_msg(operation='OP_INSTANCE_CREATE',
instance=vm.backend_vm_id)
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'STARTED')
def test_create_error(self, client):
"""Test that error create sets vm to ERROR state"""
vm = mfactory.VirtualMachineFactory()
msg = self.create_msg(operation='OP_INSTANCE_CREATE',
instance=vm.backend_vm_id,
status='error')
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'ERROR')
def test_remove_from_error(self, client):
"""Test that error removes delete error builds"""
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
# Also create a NIC
mfactory.NetworkInterfaceFactory(machine=vm)
msg = self.create_msg(operation='OP_INSTANCE_REMOVE',
instance=vm.backend_vm_id)
with mocked_quotaholder():
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, 'DESTROYED')
self.assertTrue(db_vm.deleted)
# Check that nics are deleted
self.assertFalse(db_vm.nics.all())
def test_other_error(self, client):
"""Test that other error messages do no affect the VM"""
vm = mfactory.VirtualMachineFactory()
msg = self.create_msg(operation='OP_INSTANCE_STARTUP',
instance=vm.backend_vm_id,
status='error')
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, vm.operstate)
self.assertEqual(db_vm.backendtime, vm.backendtime)
def test_resize_msg(self, client):
vm = mfactory.VirtualMachineFactory()
# Test empty beparams
for status in ["success", "error"]:
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS',
instance=vm.backend_vm_id,
job_fields={"beparams": {}},
status=status)
client.reset_mock()
with mocked_quotaholder():
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, vm.operstate)
# Test intermediate states
vm.operstate = "STOPPED"
vm.save()
for status in ["queued", "waiting", "running"]:
beparams = {"vcpus": 4, "minmem": 2048, "maxmem": 2048}
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS',
instance=vm.backend_vm_id,
job_fields={"beparams": beparams},
status=status)
client.reset_mock()
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, "STOPPED")
# Test operstate after error
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS',
instance=vm.backend_vm_id,
beparams={"vcpus": 4},
status="error")
client.reset_mock()
with mocked_quotaholder():
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, "STOPPED")
# Test success
f1 = mfactory.FlavorFactory(cpu=4, ram=1024,
volume_type__disk_template="drbd",
disk=1024)
vm.flavor = f1
vm.save()
f2 = mfactory.FlavorFactory(cpu=8, ram=2048,
volume_type__disk_template="drbd",
disk=1024)
beparams = {"vcpus": 8, "minmem": 2048, "maxmem": 2048}
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS',
instance=vm.backend_vm_id,
job_fields={"beparams": beparams},
status="success")
client.reset_mock()
with mocked_quotaholder():
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(db_vm.operstate, "STOPPED")
self.assertEqual(db_vm.flavor, f2)
beparams = {"vcpus": 100, "minmem": 2048, "maxmem": 2048}
msg = self.create_msg(operation='OP_INSTANCE_SET_PARAMS',
instance=vm.backend_vm_id,
job_fields={"beparams": beparams},
status="success")
client.reset_mock()
with mocked_quotaholder():
update_db(client, msg)
self.assertTrue(client.basic_reject.called)
@patch("synnefo.plankton.backend.get_pithos_backend")
def test_error_snapshot(self, pithos_backend, client):
vm = mfactory.VirtualMachineFactory()
disks = [
(0, {"snapshot_info": json.dumps({"snapshot_id":
"test_snapshot_id"})})
]
msg = self.create_msg(operation='OP_INSTANCE_SNAPSHOT',
instance=vm.backend_vm_id,
job_fields={'disks': disks},
status="running")
update_db(client, msg)
self.assertEqual(pithos_backend().update_object_status.mock_calls, [])
msg = self.create_msg(operation='OP_INSTANCE_SNAPSHOT',
instance=vm.backend_vm_id,
job_fields={'disks': disks},
event_time=split_time(time()),
status="error")
update_db(client, msg)
pithos_backend().update_object_status\
.assert_called_once_with("test_snapshot_id", state=-1)
pithos_backend.reset_mock()
msg = self.create_msg(operation='OP_INSTANCE_SNAPSHOT',
instance=vm.backend_vm_id,
job_fields={'disks': disks},
event_time=split_time(time()),
status="success")
update_db(client, msg)
pithos_backend().update_object_status\
.assert_called_once_with("test_snapshot_id", state=1)
@patch('synnefo.lib.amqp.AMQPClient')
class UpdateNetTest(TestCase):
def create_msg(self, **kwargs):
"""Create snf-ganeti-hook message"""
msg = {'event_time': split_time(time())}
msg['type'] = 'ganeti-op-status'
msg['operation'] = 'OP_INSTANCE_SET_PARAMS'
msg['status'] = 'success'
msg['jobId'] = 1
msg['logmsg'] = 'Dummy Log'
for key, val in kwargs.items():
msg[key] = val
message = {'body': json.dumps(msg)}
return message
def test_missing_attribute(self, client):
update_db(client, json.dumps({'body': {}}))
self.assertTrue(client.basic_reject.called)
def test_unhandled_exception(self, client):
update_db(client, None)
self.assertEqual(1, client.basic_reject.call_count)
def test_not_json(self, client):
update_db(client, {'body': ''})
self.assertEqual(1, client.basic_nack.call_count)
def test_malformed_json(self, client):
update_db(client, {'body': '{}'})
self.assertEqual(1, client.basic_nack.call_count)
def test_wrong_type(self, client):
msg = self.create_msg(type="WRONG_TYPE")
update_db(client, msg)
self.assertTrue(client.basic_nack.called)
def test_missing_instance(self, client):
msg = self.create_msg(operation='OP_INSTANCE_STARTUP',
instance='foo')
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
def test_no_nics(self, client):
vm = mfactory.VirtualMachineFactory(operstate='ERROR')
mfactory.NetworkInterfaceFactory(machine=vm, state="ACTIVE")
mfactory.NetworkInterfaceFactory(machine=vm, state="ACTIVE")
mfactory.NetworkInterfaceFactory(machine=vm, state="ACTIVE")
self.assertEqual(len(vm.nics.all()), 3)
msg = self.create_msg(instance_nics=[],
instance=vm.backend_vm_id)
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
self.assertEqual(len(db_vm.nics.all()), 0)
def test_changed_nic(self, client):
ip = mfactory.IPv4AddressFactory(subnet__cidr="10.0.0.0/24",
address="10.0.0.2")
network = ip.network
subnet = ip.subnet
vm = ip.nic.machine
pool = subnet.get_ip_pools()[0]
pool.reserve("10.0.0.2")
pool.save()
msg = self.create_msg(instance_nics=[{'network': network.backend_id,
'ip': '10.0.0.3',
'mac': 'aa:bb:cc:00:11:22',
'name': ip.nic.backend_uuid}],
instance=vm.backend_vm_id)
update_db(client, msg)
self.assertTrue(client.basic_ack.called)
db_vm = VirtualMachine.objects.get(id=vm.id)
nics = db_vm.nics.all()
self.assertEqual(len(nics), 1)
self.assertEqual(nics[0].index, 0)
self.assertEqual(nics[0].ipv4_address, '10.0.0.3')
self.assertEqual(nics[0].mac, 'aa:bb:cc:00:11:22')
pool = subnet.get_ip_pools()[0]
self.assertTrue(pool.is_available('10.0.0.2'))
self.assertFalse(pool.is_available('10.0.0.3'))
pool.save()
@patch('synnefo.lib.amqp.AMQPClient')
class UpdateNetworkTest(TestCase):
def create_msg(self, **kwargs):
"""Create snf-ganeti-eventd message"""
msg = {'event_time': split_time(time())}
msg['type'] = 'ganeti-network-status'
msg['status'] = 'success'
msg['jobId'] = 1
msg['logmsg'] = 'Dummy Log'
for key, val in kwargs.items():
msg[key] = val
message = {'body': json.dumps(msg)}
return message
def test_missing_attribute(self, client):
update_network(client, json.dumps({'body': {}}))
self.assertTrue(client.basic_reject.called)
def test_unhandled_exception(self, client):
update_db(client, None)
self.assertEqual(1, client.basic_reject.call_count)
def test_not_json(self, client):
update_db(client, {'body': ''})
self.assertEqual(1, client.basic_nack.call_count)
def test_malformed_json(self, client):
update_db(client, {'body': '{}'})
self.assertEqual(1, client.basic_nack.call_count)
def test_wrong_type(self, client):
msg = self.create_msg(type="WRONG_TYPE")
update_network(client, msg)
self.assertTrue(client.basic_nack.called)
def test_missing_network(self, client):
msg = self.create_msg(operation='OP_NETWORK_CREATE',
network='foo')
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
def test_create(self, client):
back_network = mfactory.BackendNetworkFactory(operstate='PENDING')
net = back_network.network
net.state = 'ACTIVE'
net.save()
back1 = back_network.backend
back_network2 = mfactory.BackendNetworkFactory(operstate='PENDING',
network=net)
back2 = back_network2.backend
# Message from first backend network
msg = self.create_msg(operation='OP_NETWORK_CONNECT',
network=net.backend_id,
cluster=back1.clustername)
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
back_net = BackendNetwork.objects.get(id=back_network.id)
self.assertEqual(back_net.operstate, 'ACTIVE')
db_net = Network.objects.get(id=net.id)
self.assertEqual(db_net.state, 'ACTIVE')
# msg from second backend network
msg = self.create_msg(operation='OP_NETWORK_CONNECT',
network=net.backend_id,
cluster=back2.clustername)
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
db_net = Network.objects.get(id=net.id)
self.assertEqual(db_net.state, 'ACTIVE')
back_net = BackendNetwork.objects.get(id=back_network.id)
self.assertEqual(back_net.operstate, 'ACTIVE')
def test_create_offline_backend(self, client):
"""Test network creation when a backend is offline"""
net = mfactory.NetworkFactory(state='ACTIVE')
bn1 = mfactory.BackendNetworkFactory(network=net)
mfactory.BackendNetworkFactory(network=net,
backend__offline=True)
msg = self.create_msg(operation='OP_NETWORK_CONNECT',
network=net.backend_id,
cluster=bn1.backend.clustername)
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
new_net = Network.objects.get(id=net.id)
self.assertEqual(new_net.state, 'ACTIVE')
def test_disconnect(self, client):
bn1 = mfactory.BackendNetworkFactory(operstate='ACTIVE')
net1 = bn1.network
net1.state = "ACTIVE"
net1.state = 'ACTIVE'
net1.save()
bn2 = mfactory.BackendNetworkFactory(operstate='ACTIVE',
network=net1)
msg = self.create_msg(operation='OP_NETWORK_DISCONNECT',
network=net1.backend_id,
cluster=bn2.backend.clustername)
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
self.assertEqual(Network.objects.get(id=net1.id).state, 'ACTIVE')
self.assertEqual(BackendNetwork.objects.get(id=bn2.id).operstate,
'PENDING')
def test_remove(self, client):
mfactory.MacPrefixPoolTableFactory()
mfactory.BridgePoolTableFactory()
bn = mfactory.BackendNetworkFactory(operstate='ACTIVE')
for old_state in ['success', 'canceled', 'error']:
for flavor in Network.FLAVORS.keys():
bn.operstate = old_state
bn.save()
net = bn.network
net.state = 'ACTIVE'
net.flavor = flavor
if flavor == 'PHYSICAL_VLAN':
net.link = allocate_resource('bridge')
if flavor == 'MAC_FILTERED':
net.mac_prefix = allocate_resource('mac_prefix')
net.save()
msg = self.create_msg(operation='OP_NETWORK_REMOVE',
network=net.backend_id,
cluster=bn.backend.clustername)
with mocked_quotaholder():
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
self.assertFalse(BackendNetwork.objects.filter(id=bn.id)
.exists())
db_net = Network.objects.get(id=net.id)
self.assertEqual(db_net.state, 'DELETED', flavor)
self.assertTrue(db_net.deleted)
if flavor == 'PHYSICAL_VLAN':
pool = BridgePoolTable.get_pool()
self.assertTrue(pool.is_available(net.link))
if flavor == 'MAC_FILTERED':
pool = MacPrefixPoolTable.get_pool()
self.assertTrue(pool.is_available(net.mac_prefix))
@patch("synnefo.logic.rapi_pool.GanetiRapiClient")
def test_remove_error(self, rapi, client):
mfactory.MacPrefixPoolTableFactory()
mfactory.BridgePoolTableFactory()
bn = mfactory.BackendNetworkFactory(operstate='ACTIVE')
network = bn.network
msg = self.create_msg(operation='OP_NETWORK_REMOVE',
network=network.backend_id,
status="error",
cluster=bn.backend.clustername)
rapi().GetNetwork.return_value = {}
update_network(client, msg)
bn = BackendNetwork.objects.get(id=bn.id)
self.assertNotEqual(bn.operstate, "DELETED")
rapi().GetNetwork.side_effect = GanetiApiError(msg="foo", code=404)
with mocked_quotaholder():
update_network(client, msg)
self.assertFalse(BackendNetwork.objects.filter(id=bn.id) .exists())
def test_remove_offline_backend(self, client):
"""Test network removing when a backend is offline"""
mfactory.BridgePoolTableFactory()
net = mfactory.NetworkFactory(flavor='PHYSICAL_VLAN',
state='ACTIVE',
link='prv12')
bn1 = mfactory.BackendNetworkFactory(network=net)
mfactory.BackendNetworkFactory(network=net,
operstate="ACTIVE",
backend__offline=True)
msg = self.create_msg(operation='OP_NETWORK_REMOVE',
network=net.backend_id,
cluster=bn1.backend.clustername)
with mocked_quotaholder():
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
new_net = Network.objects.get(id=net.id)
self.assertEqual(new_net.state, 'ACTIVE')
self.assertFalse(new_net.deleted)
@patch("synnefo.logic.rapi_pool.GanetiRapiClient")
def test_error_opcode(self, rapi, client):
# Mock getting network, because code will lookup if network exists
# in backend
rapi().GetNetwork.return_value = {}
mfactory.MacPrefixPoolTableFactory()
mfactory.BridgePoolTableFactory()
network = mfactory.NetworkFactory()
mfactory.BackendNetworkFactory(network=network,
operstate="ACTIVE")
for state, _ in Network.OPER_STATES:
bn = mfactory.BackendNetworkFactory(operstate="ACTIVE",
network=network)
bn.operstate = state
bn.save()
network = bn.network
network.state = state
network.save()
for opcode, _ in BackendNetwork.BACKEND_OPCODES:
if opcode in ['OP_NETWORK_REMOVE', 'OP_NETWORK_ADD']:
continue
msg = self.create_msg(operation=opcode,
network=bn.network.backend_id,
status='error',
add_reserved_ips=[],
remove_reserved_ips=[],
cluster=bn.backend.clustername)
with mocked_quotaholder():
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
db_bnet = BackendNetwork.objects.get(id=bn.id)
self.assertEqual(bn.operstate, db_bnet.operstate)
self.assertEqual(bn.network.state, db_bnet.network.state)
def test_ips(self, client):
network = mfactory.NetworkWithSubnetFactory(subnet__cidr='10.0.0.0/24',
subnet__gateway="10.0.0.1")
bn = mfactory.BackendNetworkFactory(network=network)
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS',
network=network.backend_id,
cluster=bn.backend.clustername,
status='success',
job_fields={"add_reserved_ips": ["10.0.0.10",
"10.0.0.20"]})
update_network(client, msg)
self.assertTrue(client.basic_ack.called)
pool = network.get_ip_pools()[0]
self.assertTrue(pool.is_reserved('10.0.0.10'))
self.assertTrue(pool.is_reserved('10.0.0.20'))
pool.save()
# Check that they are not released
msg = self.create_msg(operation='OP_NETWORK_SET_PARAMS',
network=network.backend_id,
cluster=bn.backend.clustername,
job_fields={
"remove_reserved_ips": ["10.0.0.10",
"10.0.0.20"]})
update_network(client, msg)
pool = network.get_ip_pools()[0]
self.assertTrue(pool.is_reserved('10.0.0.10'))
self.assertTrue(pool.is_reserved('10.0.0.20'))
@patch('synnefo.lib.amqp.AMQPClient')
class UpdateBuildProgressTest(TestCase):
def setUp(self):
self.vm = mfactory.VirtualMachineFactory()
def get_db_vm(self):
return VirtualMachine.objects.get(id=self.vm.id)
def create_msg(self, **kwargs):
"""Create snf-progress-monitor message"""
msg = {'event_time': split_time(time())}
msg['type'] = 'image-copy-progress'
msg['progress'] = 0
for key, val in kwargs.items():
msg[key] = val
message = {'body': json.dumps(msg)}
return message
def test_missing_attribute(self, client):
update_build_progress(client, json.dumps({'body': {}}))
self.assertTrue(client.basic_reject.called)
def test_unhandled_exception(self, client):
update_db(client, None)
self.assertEqual(1, client.basic_reject.call_count)
def test_not_json(self, client):
update_db(client, {'body': ''})
self.assertEqual(1, client.basic_nack.call_count)
def test_malformed_json(self, client):
update_db(client, {'body': '{}'})
self.assertEqual(1, client.basic_nack.call_count)
def test_missing_instance(self, client):
msg = self.create_msg(instance='foo')
update_build_progress(client, msg)
self.assertTrue(client.basic_ack.called)
def test_wrong_type(self, client):
msg = self.create_msg(type="WRONG_TYPE")
update_build_progress(client, msg)
self.assertTrue(client.basic_nack.called)
def test_progress_update(self, client):
rprogress = randint(10, 100)
msg = self.create_msg(progress=rprogress,
instance=self.vm.backend_vm_id)
update_build_progress(client, msg)
self.assertTrue(client.basic_ack.called)
vm = self.get_db_vm()
self.assertEqual(vm.buildpercentage, rprogress)
def test_invalid_value(self, client):
old = self.vm.buildpercentage
for rprogress in [0, -1, 'a']:
msg = self.create_msg(progress=rprogress,
instance=self.vm.backend_vm_id)
update_build_progress(client, msg)
self.assertTrue(client.basic_ack.called)
vm = self.get_db_vm()
self.assertEqual(vm.buildpercentage, old)
| gpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/encodings/mac_centeuro.py | 257 | 14102 | """ Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
botify-labs/moto | tests/test_swf/responses/test_workflow_types.py | 12 | 5239 | import sure
import boto
from moto import mock_swf_deprecated
from boto.swf.exceptions import SWFResponseError
# RegisterWorkflowType endpoint
@mock_swf_deprecated
def test_register_workflow_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type("test-domain", "test-workflow", "v1.0")
types = conn.list_workflow_types("test-domain", "REGISTERED")
actype = types["typeInfos"][0]
actype["workflowType"]["name"].should.equal("test-workflow")
actype["workflowType"]["version"].should.equal("v1.0")
@mock_swf_deprecated
def test_register_already_existing_workflow_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type("test-domain", "test-workflow", "v1.0")
conn.register_workflow_type.when.called_with(
"test-domain", "test-workflow", "v1.0"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_register_with_wrong_parameter_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type.when.called_with(
"test-domain", "test-workflow", 12
).should.throw(SWFResponseError)
# ListWorkflowTypes endpoint
@mock_swf_deprecated
def test_list_workflow_types():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0")
conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0")
conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0")
all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED")
names = [activity_type["workflowType"]["name"]
for activity_type in all_workflow_types["typeInfos"]]
names.should.equal(
["a-test-workflow", "b-test-workflow", "c-test-workflow"])
@mock_swf_deprecated
def test_list_workflow_types_reverse_order():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0")
conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0")
conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0")
all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED",
reverse_order=True)
names = [activity_type["workflowType"]["name"]
for activity_type in all_workflow_types["typeInfos"]]
names.should.equal(
["c-test-workflow", "b-test-workflow", "a-test-workflow"])
# DeprecateWorkflowType endpoint
@mock_swf_deprecated
def test_deprecate_workflow_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type("test-domain", "test-workflow", "v1.0")
conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0")
actypes = conn.list_workflow_types("test-domain", "DEPRECATED")
actype = actypes["typeInfos"][0]
actype["workflowType"]["name"].should.equal("test-workflow")
actype["workflowType"]["version"].should.equal("v1.0")
@mock_swf_deprecated
def test_deprecate_already_deprecated_workflow_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type("test-domain", "test-workflow", "v1.0")
conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0")
conn.deprecate_workflow_type.when.called_with(
"test-domain", "test-workflow", "v1.0"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_deprecate_non_existent_workflow_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.deprecate_workflow_type.when.called_with(
"test-domain", "non-existent", "v1.0"
).should.throw(SWFResponseError)
# DescribeWorkflowType endpoint
@mock_swf_deprecated
def test_describe_workflow_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.register_workflow_type("test-domain", "test-workflow", "v1.0",
task_list="foo", default_child_policy="TERMINATE")
actype = conn.describe_workflow_type(
"test-domain", "test-workflow", "v1.0")
actype["configuration"]["defaultTaskList"]["name"].should.equal("foo")
actype["configuration"]["defaultChildPolicy"].should.equal("TERMINATE")
actype["configuration"].keys().should_not.contain(
"defaultTaskStartToCloseTimeout")
infos = actype["typeInfo"]
infos["workflowType"]["name"].should.equal("test-workflow")
infos["workflowType"]["version"].should.equal("v1.0")
infos["status"].should.equal("REGISTERED")
@mock_swf_deprecated
def test_describe_non_existent_workflow_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60")
conn.describe_workflow_type.when.called_with(
"test-domain", "non-existent", "v1.0"
).should.throw(SWFResponseError)
| apache-2.0 |
cclauss/stash | bin/xargs.py | 2 | 1537 | """ Construct argument list(s) and execute utility
"""
import os
import sys
import argparse
_stash = globals()['_stash']
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('-n',
nargs='?',
metavar='number',
type=int,
help='maximum number of arguments taken from standard input for each invocation of utility')
ap.add_argument('-I',
dest='replstr',
nargs='?',
help='replacement string')
ap.add_argument('utility',
nargs='?',
default='echo',
help='utility to invoke')
ap.add_argument('args_to_pass',
metavar='arguments',
nargs=argparse.REMAINDER,
help='arguments to the utility')
ns = ap.parse_args(args)
lines = [line.strip() for line in sys.stdin.readlines()]
n = ns.n if ns.n else len(lines)
if ns.replstr:
n = 1
while lines:
rest = ' '.join(lines[:n])
lines = lines[n:]
args_to_pass = ' '.join(ns.args_to_pass)
if rest.strip():
if ns.replstr:
args_to_pass = args_to_pass.replace(ns.replstr, rest)
rest = ''
cmdline = '%s %s %s' % (ns.utility,
args_to_pass,
rest)
_stash(cmdline)
if __name__ == "__main__":
main(sys.argv[1:]) | mit |
lmazuel/azure-sdk-for-python | azure-mgmt-recoveryservices/azure/mgmt/recoveryservices/models/resource_certificate_and_aad_details.py | 2 | 4137 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_certificate_details import ResourceCertificateDetails
class ResourceCertificateAndAadDetails(ResourceCertificateDetails):
"""Certificate details representing the Vault credentials for AAD.
:param certificate: The base64 encoded certificate raw data string.
:type certificate: bytearray
:param friendly_name: Certificate friendlyname.
:type friendly_name: str
:param issuer: Certificate issuer.
:type issuer: str
:param resource_id: Resource ID of the vault.
:type resource_id: long
:param subject: Certificate Subject Name.
:type subject: str
:param thumbprint: Certificate thumbprint.
:type thumbprint: str
:param valid_from: Certificate Validity start Date time.
:type valid_from: datetime
:param valid_to: Certificate Validity End Date time.
:type valid_to: datetime
:param auth_type: Polymorphic Discriminator
:type auth_type: str
:param aad_authority: AAD tenant authority.
:type aad_authority: str
:param aad_tenant_id: AAD tenant Id.
:type aad_tenant_id: str
:param service_principal_client_id: AAD service principal clientId.
:type service_principal_client_id: str
:param service_principal_object_id: AAD service principal ObjectId.
:type service_principal_object_id: str
:param azure_management_endpoint_audience: Azure Management Endpoint
Audience.
:type azure_management_endpoint_audience: str
"""
_validation = {
'auth_type': {'required': True},
'aad_authority': {'required': True},
'aad_tenant_id': {'required': True},
'service_principal_client_id': {'required': True},
'service_principal_object_id': {'required': True},
'azure_management_endpoint_audience': {'required': True},
}
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'bytearray'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'long'},
'subject': {'key': 'subject', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'valid_from': {'key': 'validFrom', 'type': 'iso-8601'},
'valid_to': {'key': 'validTo', 'type': 'iso-8601'},
'auth_type': {'key': 'authType', 'type': 'str'},
'aad_authority': {'key': 'aadAuthority', 'type': 'str'},
'aad_tenant_id': {'key': 'aadTenantId', 'type': 'str'},
'service_principal_client_id': {'key': 'servicePrincipalClientId', 'type': 'str'},
'service_principal_object_id': {'key': 'servicePrincipalObjectId', 'type': 'str'},
'azure_management_endpoint_audience': {'key': 'azureManagementEndpointAudience', 'type': 'str'},
}
def __init__(self, aad_authority, aad_tenant_id, service_principal_client_id, service_principal_object_id, azure_management_endpoint_audience, certificate=None, friendly_name=None, issuer=None, resource_id=None, subject=None, thumbprint=None, valid_from=None, valid_to=None):
super(ResourceCertificateAndAadDetails, self).__init__(certificate=certificate, friendly_name=friendly_name, issuer=issuer, resource_id=resource_id, subject=subject, thumbprint=thumbprint, valid_from=valid_from, valid_to=valid_to)
self.aad_authority = aad_authority
self.aad_tenant_id = aad_tenant_id
self.service_principal_client_id = service_principal_client_id
self.service_principal_object_id = service_principal_object_id
self.azure_management_endpoint_audience = azure_management_endpoint_audience
self.auth_type = 'AzureActiveDirectory'
| mit |
indextbag/arsenalsuite | python/dist_Changes/__init__.py | 10 | 21144 | # This module exists to create the "best" dispatch object for a given
# object. If "makepy" support for a given object is detected, it is
# used, otherwise a dynamic dispatch object.
# Note that if the unknown dispatch object then returns a known
# dispatch object, the known class will be used. This contrasts
# with dynamic.Dispatch behaviour, where dynamic objects are always used.
import __builtin__
# For some bizarre reason, __builtins__ fails with attribute error on __dict__ here?
NeedUnicodeConversions = not hasattr(__builtin__, "unicode")
import dynamic, gencache, pythoncom
import sys
import pywintypes
from types import TupleType
from pywintypes import UnicodeType
_PyIDispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch]
def __WrapDispatch(dispatch, userName = None, resultCLSID = None, typeinfo = None, \
UnicodeToString = NeedUnicodeConversions, clsctx = pythoncom.CLSCTX_SERVER,
WrapperClass = None):
"""
Helper function to return a makepy generated class for a CLSID if it exists,
otherwise cope by using CDispatch.
"""
# if resultCLSID is None:
# try:
# typeinfo = dispatch.GetTypeInfo()
# if typeinfo is not None: # Some objects return NULL, some raise exceptions...
# resultCLSID = str(typeinfo.GetTypeAttr()[0])
# except pythoncom.com_error:
# pass
# if resultCLSID is not None:
# import gencache
# # Attempt to load generated module support
# # This may load the module, and make it available
# klass = gencache.GetClassForCLSID(resultCLSID)
# if klass is not None:
# return klass(dispatch)
#
# # Return a "dynamic" object - best we can do!
# if WrapperClass is None: WrapperClass = CDispatch
return dynamic.Dispatch(dispatch, userName, WrapperClass, typeinfo, UnicodeToString=UnicodeToString,clsctx=clsctx)
def GetObject(Pathname = None, Class = None, clsctx = None):
"""
Mimic VB's GetObject() function.
ob = GetObject(Class = "ProgID") or GetObject(Class = clsid) will
connect to an already running instance of the COM object.
ob = GetObject(r"c:\blah\blah\foo.xls") (aka the COM moniker syntax)
will return a ready to use Python wrapping of the required COM object.
Note: You must specifiy one or the other of these arguments. I know
this isn't pretty, but it is what VB does. Blech. If you don't
I'll throw ValueError at you. :)
This will most likely throw pythoncom.com_error if anything fails.
"""
if clsctx is None:
clsctx = pythoncom.CLSCTX_ALL
if (Pathname is None and Class is None) or \
(Pathname is not None and Class is not None):
raise ValueError, "You must specify a value for Pathname or Class, but not both."
if Class is not None:
return GetActiveObject(Class, clsctx)
else:
return Moniker(Pathname, clsctx)
def GetActiveObject(Class, clsctx = pythoncom.CLSCTX_ALL):
"""
Python friendly version of GetObject's ProgID/CLSID functionality.
"""
resultCLSID = pywintypes.IID(Class)
dispatch = pythoncom.GetActiveObject(resultCLSID)
dispatch = dispatch.QueryInterface(pythoncom.IID_IDispatch)
return __WrapDispatch(dispatch, Class, resultCLSID = resultCLSID, clsctx = clsctx)
def Moniker(Pathname, clsctx = pythoncom.CLSCTX_ALL):
"""
Python friendly version of GetObject's moniker functionality.
"""
moniker, i, bindCtx = pythoncom.MkParseDisplayName(Pathname)
dispatch = moniker.BindToObject(bindCtx, None, pythoncom.IID_IDispatch)
return __WrapDispatch(dispatch, Pathname, clsctx = clsctx)
def Dispatch(dispatch, userName = None, resultCLSID = None, typeinfo = None, UnicodeToString=NeedUnicodeConversions, clsctx = pythoncom.CLSCTX_SERVER):
"""Creates a Dispatch based COM object.
"""
dispatch, userName = dynamic._GetGoodDispatchAndUserName(dispatch,userName,clsctx)
return __WrapDispatch(dispatch, userName, resultCLSID, typeinfo, UnicodeToString, clsctx)
def DispatchEx(clsid, machine=None, userName = None, resultCLSID = None, typeinfo = None, UnicodeToString=NeedUnicodeConversions, clsctx = None):
"""Creates a Dispatch based COM object on a specific machine.
"""
# If InProc is registered, DCOM will use it regardless of the machine name
# (and regardless of the DCOM config for the object.) So unless the user
# specifies otherwise, we exclude inproc apps when a remote machine is used.
if clsctx is None:
clsctx = pythoncom.CLSCTX_SERVER
if machine is not None: clsctx = clsctx & ~pythoncom.CLSCTX_INPROC
if machine is None:
serverInfo = None
else:
serverInfo = (machine,)
if userName is None: userName = clsid
dispatch = pythoncom.CoCreateInstanceEx(clsid, None, clsctx, serverInfo, (pythoncom.IID_IDispatch,))[0]
return Dispatch(dispatch, userName, resultCLSID, typeinfo, UnicodeToString=UnicodeToString, clsctx=clsctx)
class CDispatch(dynamic.CDispatch):
"""
The dynamic class used as a last resort.
The purpose of this overriding of dynamic.CDispatch is to perpetuate the policy
of using the makepy generated wrapper Python class instead of dynamic.CDispatch
if/when possible.
"""
def _wrap_dispatch_(self, ob, userName = None, returnCLSID = None, UnicodeToString = NeedUnicodeConversions):
return Dispatch(ob, userName, returnCLSID,None,UnicodeToString)
def CastTo(ob, target):
"""'Cast' a COM object to another interface"""
# todo - should support target being an IID
if hasattr(target, "index"): # string like
# for now, we assume makepy for this to work.
if not ob.__class__.__dict__.has_key("CLSID"):
# Eeek - no makepy support - try and build it.
ob = gencache.EnsureDispatch(ob)
if not ob.__class__.__dict__.has_key("CLSID"):
raise ValueError, "Must be a makepy-able object for this to work"
clsid = ob.CLSID
# Lots of hoops to support "demand-build" - ie, generating
# code for an interface first time it is used. We assume the
# interface name exists in the same library as the object.
# This is generally the case - only referenced typelibs may be
# a problem, and we can handle that later. Maybe <wink>
# So get the generated module for the library itself, then
# find the interface CLSID there.
mod = gencache.GetModuleForCLSID(clsid)
# Get the 'root' module.
mod = gencache.GetModuleForTypelib(mod.CLSID, mod.LCID,
mod.MajorVersion, mod.MinorVersion)
# Find the CLSID of the target
target_clsid = mod.NamesToIIDMap.get(target)
if target_clsid is None:
raise ValueError, "The interface name '%s' does not appear in the " \
"same library as object '%r'" % (target, ob)
mod = gencache.GetModuleForCLSID(target_clsid)
target_class = getattr(mod, target)
# resolve coclass to interface
target_class = getattr(target_class, "default_interface", target_class)
return target_class(ob) # auto QI magic happens
raise ValueError, "This object can not be cast"
class Constants:
"""A container for generated COM constants.
"""
def __init__(self):
self.__dicts__ = [] # A list of dictionaries
def __getattr__(self, a):
for d in self.__dicts__:
if d.has_key(a):
return d[a]
raise AttributeError, a
# And create an instance.
constants = Constants()
# A helpers for DispatchWithEvents - this becomes __setattr__ for the
# temporary class.
def _event_setattr_(self, attr, val):
try:
# Does the COM object have an attribute of this name?
self.__class__.__bases__[0].__setattr__(self, attr, val)
except AttributeError:
# Otherwise just stash it away in the instance.
self.__dict__[attr] = val
# An instance of this "proxy" is created to break the COM circular references
# that exist (ie, when we connect to the COM events, COM keeps a reference
# to the object. Thus, the Event connection must be manually broken before
# our object can die. This solves the problem by manually breaking the connection
# to the real object as the proxy dies.
class EventsProxy:
def __init__(self, ob):
self.__dict__['_obj_'] = ob
def __del__(self):
try:
# If there is a COM error on disconnection we should
# just ignore it - object probably already shut down...
self._obj_.close()
except pythoncom.com_error:
pass
def __getattr__(self, attr):
return getattr(self._obj_, attr)
def __setattr__(self, attr, val):
setattr(self._obj_, attr, val)
def DispatchWithEvents(clsid, user_event_class):
"""Create a COM object that can fire events to a user defined class.
clsid -- The ProgID or CLSID of the object to create.
user_event_class -- A Python class object that responds to the events.
This requires makepy support for the COM object being created. If
this support does not exist it will be automatically generated by
this function. If the object does not support makepy, a TypeError
exception will be raised.
The result is a class instance that both represents the COM object
and handles events from the COM object.
It is important to note that the returned instance is not a direct
instance of the user_event_class, but an instance of a temporary
class object that derives from three classes:
* The makepy generated class for the COM object
* The makepy generated class for the COM events
* The user_event_class as passed to this function.
If this is not suitable, see the getevents function for an alternative
technique of handling events.
Object Lifetimes: Whenever the object returned from this function is
cleaned-up by Python, the events will be disconnected from
the COM object. This is almost always what should happen,
but see the documentation for getevents() for more details.
Example:
>>> class IEEvents:
... def OnVisible(self, visible):
... print "Visible changed:", visible
...
>>> ie = DispatchWithEvents("InternetExplorer.Application", IEEvents)
>>> ie.Visible = 1
Visible changed: 1
>>>
"""
# Create/Get the object.
disp = Dispatch(clsid)
if not disp.__class__.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it.
try:
ti = disp._oleobj_.GetTypeInfo()
disp_clsid = ti.GetTypeAttr()[0]
tlb, index = ti.GetContainingTypeLib()
tla = tlb.GetLibAttr()
gencache.EnsureModule(tla[0], tla[1], tla[3], tla[4], bValidateFile=0)
# Get the class from the module.
disp_class = gencache.GetClassForProgID(str(disp_clsid))
except pythoncom.com_error:
raise TypeError, "This COM object can not automate the makepy process - please run makepy manually for this object"
else:
disp_class = disp.__class__
# If the clsid was an object, get the clsid
clsid = disp_class.CLSID
# Create a new class that derives from 3 classes - the dispatch class, the event sink class and the user class.
import new
events_class = getevents(clsid)
if events_class is None:
raise ValueError, "This COM object does not support events."
result_class = new.classobj("COMEventClass", (disp_class, events_class, user_event_class), {"__setattr__" : _event_setattr_})
instance = result_class(disp._oleobj_) # This only calls the first base class __init__.
events_class.__init__(instance, instance)
if hasattr(user_event_class, "__init__"):
user_event_class.__init__(instance)
return EventsProxy(instance)
def WithEvents(disp, user_event_class):
"""Similar to DispatchWithEvents - except that the returned
object is *not* also usable as the original Dispatch object - that is
the returned object is not dispatchable.
The difference is best summarised by example.
>>> class IEEvents:
... def OnVisible(self, visible):
... print "Visible changed:", visible
...
>>> ie = Dispatch("InternetExplorer.Application")
>>> ie_events = WithEvents(ie, IEEvents)
>>> ie.Visible = 1
Visible changed: 1
Compare with the code sample for DispatchWithEvents, where you get a
single object that is both the interface and the event handler. Note that
the event handler instance will *not* be able to use 'self.' to refer to
IE's methods and properties.
This is mainly useful where using DispatchWithEvents causes
circular reference problems that the simple proxy doesn't deal with
"""
disp = Dispatch(disp)
if not disp.__class__.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it.
try:
ti = disp._oleobj_.GetTypeInfo()
disp_clsid = ti.GetTypeAttr()[0]
tlb, index = ti.GetContainingTypeLib()
tla = tlb.GetLibAttr()
gencache.EnsureModule(tla[0], tla[1], tla[3], tla[4], bValidateFile=0)
# Get the class from the module.
disp_class = gencache.GetClassForProgID(str(disp_clsid))
except pythoncom.com_error:
raise TypeError, "This COM object can not automate the makepy process - please run makepy manually for this object"
else:
disp_class = disp.__class__
# Get the clsid
clsid = disp_class.CLSID
# Create a new class that derives from 2 classes - the event sink
# class and the user class.
import new
events_class = getevents(clsid)
if events_class is None:
raise ValueError, "This COM object does not support events."
result_class = new.classobj("COMEventClass", (events_class, user_event_class), {})
instance = result_class(disp) # This only calls the first base class __init__.
if hasattr(user_event_class, "__init__"):
user_event_class.__init__(instance)
return instance
def getevents(clsid):
"""Determine the default outgoing interface for a class, given
either a clsid or progid. It returns a class - you can
conveniently derive your own handler from this class and implement
the appropriate methods.
This method relies on the classes produced by makepy. You must use
either makepy or the gencache module to ensure that the
appropriate support classes have been generated for the com server
that you will be handling events from.
Beware of COM circular references. When the Events class is connected
to the COM object, the COM object itself keeps a reference to the Python
events class. Thus, neither the Events instance or the COM object will
ever die by themselves. The 'close' method on the events instance
must be called to break this chain and allow standard Python collection
rules to manage object lifetimes. Note that DispatchWithEvents() does
work around this problem by the use of a proxy object, but if you use
the getevents() function yourself, you must make your own arrangements
to manage this circular reference issue.
Beware of creating Python circular references: this will happen if your
handler has a reference to an object that has a reference back to
the event source. Call the 'close' method to break the chain.
Example:
>>>win32com.client.gencache.EnsureModule('{EAB22AC0-30C1-11CF-A7EB-0000C05BAE0B}',0,1,1)
<module 'win32com.gen_py.....
>>>
>>> class InternetExplorerEvents(win32com.client.getevents("InternetExplorer.Application.1")):
... def OnVisible(self, Visible):
... print "Visibility changed: ", Visible
...
>>>
>>> ie=win32com.client.Dispatch("InternetExplorer.Application.1")
>>> events=InternetExplorerEvents(ie)
>>> ie.Visible=1
Visibility changed: 1
>>>
"""
# find clsid given progid or clsid
clsid=str(pywintypes.IID(clsid))
# return default outgoing interface for that class
klass = gencache.GetClassForCLSID(clsid)
try:
return klass.default_source
except AttributeError:
# See if we have a coclass for the interfaces.
try:
return gencache.GetClassForCLSID(klass.coclass_clsid).default_source
except AttributeError:
return None
# A Record object, as used by the COM struct support
def Record(name, object):
"""Creates a new record object, given the name of the record,
and an object from the same type library.
Example usage would be:
app = win32com.client.Dispatch("Some.Application")
point = win32com.client.Record("SomeAppPoint", app)
point.x = 0
point.y = 0
app.MoveTo(point)
"""
# XXX - to do - probably should allow "object" to already be a module object.
import gencache
object = gencache.EnsureDispatch(object)
module = sys.modules[object.__class__.__module__]
# to allow us to work correctly with "demand generated" code,
# we must use the typelib CLSID to obtain the module
# (otherwise we get the sub-module for the object, which
# does not hold the records)
# thus, package may be module, or may be module's parent if demand generated.
package = gencache.GetModuleForTypelib(module.CLSID, module.LCID, module.MajorVersion, module.MinorVersion)
try:
struct_guid = package.RecordMap[name]
except KeyError:
raise ValueError, "The structure '%s' is not defined in module '%s'" % (name, package)
return pythoncom.GetRecordFromGuids(module.CLSID, module.MajorVersion, module.MinorVersion, module.LCID, struct_guid)
############################################
# The base of all makepy generated classes
############################################
class DispatchBaseClass:
def __init__(self, oobj=None):
if oobj is None:
oobj = pythoncom.new(self.CLSID)
elif type(self) == type(oobj): # An instance
try:
oobj = oobj._oleobj_.QueryInterface(self.CLSID, pythoncom.IID_IDispatch) # Must be a valid COM instance
except pythoncom.com_error, details:
import winerror
# Some stupid objects fail here, even tho it is _already_ IDispatch!!??
# Eg, Lotus notes.
# So just let it use the existing object if E_NOINTERFACE
if details[0] != winerror.E_NOINTERFACE:
raise
oobj = oobj._oleobj_
self.__dict__["_oleobj_"] = oobj # so we dont call __setattr__
# Provide a prettier name than the CLSID
def __repr__(self):
# Need to get the docstring for the module for this class.
try:
mod_doc = sys.modules[self.__class__.__module__].__doc__
if mod_doc:
mod_name = "win32com.gen_py." + mod_doc
else:
mod_name = sys.modules[self.__class__.__module__].__name__
except KeyError:
mod_name = "win32com.gen_py.unknown"
return "<%s.%s instance at 0x%s>" % (mod_name, self.__class__.__name__, id(self))
# Delegate comparison to the oleobjs, as they know how to do identity.
def __cmp__(self, other):
other = getattr(other, "_oleobj_", other)
return cmp(self._oleobj_, other)
def _ApplyTypes_(self, dispid, wFlags, retType, argTypes, user,
resultCLSID, *args):
return self._get_good_object_(
self._oleobj_.InvokeTypes(
dispid, 0, wFlags, retType, argTypes, *args),
user, resultCLSID)
def __getattr__(self, attr):
args=self._prop_map_get_.get(attr)
if args is None:
raise AttributeError, "'%s' object has no attribute '%s'" % (repr(self), attr)
return self._ApplyTypes_(*args)
def __setattr__(self, attr, value):
if self.__dict__.has_key(attr): self.__dict__[attr] = value; return
try:
args, defArgs=self._prop_map_put_[attr]
except KeyError:
raise AttributeError, "'%s' object has no attribute '%s'" % (repr(self), attr)
self._oleobj_.Invoke(*(args + (value,) + defArgs))
def _get_good_single_object_(self, obj, obUserName=None, resultCLSID=None):
return _get_good_single_object_(obj, obUserName, resultCLSID)
def _get_good_object_(self, obj, obUserName=None, resultCLSID=None):
return _get_good_object_(obj, obUserName, resultCLSID)
# XXX - These should be consolidated with dynamic.py versions.
def _get_good_single_object_(obj, obUserName=None, resultCLSID=None):
if _PyIDispatchType==type(obj):
return Dispatch(obj, obUserName, resultCLSID, UnicodeToString=NeedUnicodeConversions)
elif NeedUnicodeConversions and UnicodeType==type(obj):
return str(obj)
return obj
def _get_good_object_(obj, obUserName=None, resultCLSID=None):
if obj is None:
return None
elif type(obj)==TupleType:
obUserNameTuple = (obUserName,) * len(obj)
resultCLSIDTuple = (resultCLSID,) * len(obj)
return tuple(map(_get_good_object_, obj, obUserNameTuple, resultCLSIDTuple))
else:
return _get_good_single_object_(obj, obUserName, resultCLSID)
class CoClassBaseClass:
def __init__(self, oobj=None):
if oobj is None: oobj = pythoncom.new(self.CLSID)
self.__dict__["_dispobj_"] = self.default_interface(oobj)
def __repr__(self):
return "<win32com.gen_py.%s.%s>" % (__doc__, self.__class__.__name__)
def __getattr__(self, attr):
d=self.__dict__["_dispobj_"]
if d is not None: return getattr(d, attr)
raise AttributeError, attr
def __setattr__(self, attr, value):
if self.__dict__.has_key(attr): self.__dict__[attr] = value; return
try:
d=self.__dict__["_dispobj_"]
if d is not None:
d.__setattr__(attr, value)
return
except AttributeError:
pass
self.__dict__[attr] = value
| gpl-2.0 |
eaas-framework/virtualbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Parser/InfSourceSectionParser.py | 11 | 5471 | ## @file
# This file contained the parser for [Sources] sections in INF file
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
InfSourceSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Parser.InfParserMisc import InfParserSectionRoot
class InfSourceSectionParser(InfParserSectionRoot):
## InfSourceParser
#
#
def InfSourceParser(self, SectionString, InfSectionObject, FileName):
SectionMacros = {}
ValueList = []
SourceList = []
StillCommentFalg = False
HeaderComments = []
LineComment = None
SectionContent = ''
for Line in SectionString:
SrcLineContent = Line[0]
SrcLineNo = Line[1]
if SrcLineContent.strip() == '':
continue
#
# Found Header Comments
#
if SrcLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
StillCommentFalg = True
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if SrcLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = SrcLineContent[SrcLineContent.find(DT.TAB_COMMENT_SPLIT):]
SrcLineContent = SrcLineContent[:SrcLineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineComment == None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
Name, Value = MacroParser((SrcLineContent, SrcLineNo),
FileName,
DT.MODEL_EFI_SOURCE_FILE,
self.FileLocalMacros)
if Name != None:
SectionMacros[Name] = Value
LineComment = None
HeaderComments = []
continue
#
# Replace with Local section Macro and [Defines] section Macro.
#
SrcLineContent = InfExpandMacro(SrcLineContent,
(FileName, SrcLineContent, SrcLineNo),
self.FileLocalMacros,
SectionMacros)
TokenList = GetSplitValueList(SrcLineContent, DT.TAB_VALUE_SPLIT, 4)
ValueList[0:len(TokenList)] = TokenList
#
# Store section content string after MACRO replaced.
#
SectionContent += SrcLineContent + DT.END_OF_LINE
SourceList.append((ValueList, LineComment,
(SrcLineContent, SrcLineNo, FileName)))
ValueList = []
LineComment = None
TailComments = ''
HeaderComments = []
continue
#
# Current section archs
#
ArchList = []
for Item in self.LastSectionHeaderContent:
if Item[1] not in ArchList:
ArchList.append(Item[1])
InfSectionObject.SetSupArchList(Item[1])
InfSectionObject.SetAllContent(SectionContent)
if not InfSectionObject.SetSources(SourceList, Arch = ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Sources]"),
File=FileName,
Line=Item[3]) | gpl-2.0 |
dalegregory/odoo | addons/anonymization/anonymization.py | 103 | 28698 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import os
import base64
try:
import cPickle as pickle
except ImportError:
import pickle
import random
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
from itertools import groupby
from operator import itemgetter
FIELD_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('not_existing', 'Not Existing'), ('new', 'New')]
ANONYMIZATION_STATES = FIELD_STATES + [('unstable', 'Unstable')]
WIZARD_ANONYMIZATION_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('unstable', 'Unstable')]
ANONYMIZATION_HISTORY_STATE = [('started', 'Started'), ('done', 'Done'), ('in_exception', 'Exception occured')]
ANONYMIZATION_DIRECTION = [('clear -> anonymized', 'clear -> anonymized'), ('anonymized -> clear', 'anonymized -> clear')]
def group(lst, cols):
if isinstance(cols, basestring):
cols = [cols]
return dict((k, [v for v in itr]) for k, itr in groupby(sorted(lst, key=itemgetter(*cols)), itemgetter(*cols)))
class ir_model_fields_anonymization(osv.osv):
_name = 'ir.model.fields.anonymization'
_rec_name = 'field_id'
_columns = {
'model_name': fields.char('Object Name', required=True),
'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'),
'field_name': fields.char('Field Name', required=True),
'field_id': fields.many2one('ir.model.fields', 'Field', ondelete='set null'),
'state': fields.selection(selection=FIELD_STATES, String='Status', required=True, readonly=True),
}
_sql_constraints = [
('model_id_field_id_uniq', 'unique (model_name, field_name)', _("You cannot have two fields with the same name on the same object!")),
]
def _get_global_state(self, cr, uid, context=None):
ids = self.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = self.browse(cr, uid, ids, context=context)
if not len(fields) or len(fields) == len([f for f in fields if f.state == 'clear']):
state = 'clear' # all fields are clear
elif len(fields) == len([f for f in fields if f.state == 'anonymized']):
state = 'anonymized' # all fields are anonymized
else:
state = 'unstable' # fields are mixed: this should be fixed
return state
def _check_write(self, cr, uid, context=None):
"""check that the field is created from the menu and not from an database update
otherwise the database update can crash:"""
if context is None:
context = {}
if context.get('manual'):
global_state = self._get_global_state(cr, uid, context=context)
if global_state == 'anonymized':
raise osv.except_osv('Error!', "The database is currently anonymized, you cannot create, modify or delete fields.")
elif global_state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to create, write or delete fields.")
raise osv.except_osv('Error!', msg)
return True
def _get_model_and_field_ids(self, cr, uid, vals, context=None):
model_and_field_ids = (False, False)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
ir_model_fields_obj = self.pool.get('ir.model.fields')
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', vals['model_name'])], context=context)
if model_ids:
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', vals['field_name']), ('model_id', '=', model_ids[0])], context=context)
if field_ids:
field_id = field_ids[0]
model_and_field_ids = (model_ids[0], field_id)
return model_and_field_ids
def create(self, cr, uid, vals, context=None):
# check field state: all should be clear before we can add a new field to anonymize:
self._check_write(cr, uid, context=context)
global_state = self._get_global_state(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
# check field state: all should be clear before we can modify a field:
if not (len(vals.keys()) == 1 and vals.get('state') == 'clear'):
self._check_write(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if 'field_id' in vals:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
global_state = self._get_global_state(cr, uid, context)
if global_state != 'unstable':
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
# check field state: all should be clear before we can unlink a field:
self._check_write(cr, uid, context=context)
res = super(ir_model_fields_anonymization, self).unlink(cr, uid, ids, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_name': False,
}}
if model_id:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('id', '=', model_id)])
model_id = model_ids and model_ids[0] or None
model_name = model_id and ir_model_obj.browse(cr, uid, model_id).model or False
res['value']['model_name'] = model_name
return res
def onchange_model_name(self, cr, uid, ids, model_name, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_id': False,
}}
if model_name:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', model_name)])
model_id = model_ids and model_ids[0] or False
res['value']['model_id'] = model_id
return res
def onchange_field_name(self, cr, uid, ids, field_name, model_name):
res = {'value': {
'field_id': False,
}}
if field_name and model_name:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', field_name), ('model', '=', model_name)])
field_id = field_ids and field_ids[0] or False
res['value']['field_id'] = field_id
return res
def onchange_field_id(self, cr, uid, ids, field_id, model_name):
res = {'value': {
'field_name': False,
}}
if field_id:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field = ir_model_fields_obj.browse(cr, uid, field_id)
res['value']['field_name'] = field.name
return res
_defaults = {
'state': lambda *a: 'clear',
}
class ir_model_fields_anonymization_history(osv.osv):
_name = 'ir.model.fields.anonymization.history'
_order = "date desc"
_columns = {
'date': fields.datetime('Date', required=True, readonly=True),
'field_ids': fields.many2many('ir.model.fields.anonymization', 'anonymized_field_to_history_rel', 'field_id', 'history_id', 'Fields', readonly=True),
'state': fields.selection(selection=ANONYMIZATION_HISTORY_STATE, string='Status', required=True, readonly=True),
'direction': fields.selection(selection=ANONYMIZATION_DIRECTION, string='Direction', size=20, required=True, readonly=True),
'msg': fields.text('Message', readonly=True),
'filepath': fields.char(string='File path', readonly=True),
}
class ir_model_fields_anonymize_wizard(osv.osv_memory):
_name = 'ir.model.fields.anonymize.wizard'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
state = self._get_state_value(cr, uid, context=None)
for id in ids:
res[id] = state
return res
def _get_summary(self, cr, uid, ids, name, arg, context=None):
res = {}
summary = self._get_summary_value(cr, uid, context)
for id in ids:
res[id] = summary
return res
_columns = {
'name': fields.char(string='File Name'),
'summary': fields.function(_get_summary, type='text', string='Summary'),
'file_export': fields.binary(string='Export'),
'file_import': fields.binary(string='Import', help="This is the file created by the anonymization process. It should have the '.pickle' extention."),
'state': fields.function(_get_state, string='Status', type='selection', selection=WIZARD_ANONYMIZATION_STATES, readonly=False),
'msg': fields.text(string='Message'),
}
def _get_state_value(self, cr, uid, context=None):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
return state
def _get_summary_value(self, cr, uid, context=None):
summary = u''
anon_field_obj = self.pool.get('ir.model.fields.anonymization')
ir_model_fields_obj = self.pool.get('ir.model.fields')
anon_field_ids = anon_field_obj.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
anon_fields = anon_field_obj.browse(cr, uid, anon_field_ids, context=context)
field_ids = [anon_field.field_id.id for anon_field in anon_fields if anon_field.field_id]
fields = ir_model_fields_obj.browse(cr, uid, field_ids, context=context)
fields_by_id = dict([(f.id, f) for f in fields])
for anon_field in anon_fields:
field = fields_by_id.get(anon_field.field_id.id)
values = {
'model_name': field.model_id.name,
'model_code': field.model_id.model,
'field_code': field.name,
'field_name': field.field_description,
'state': anon_field.state,
}
summary += u" * %(model_name)s (%(model_code)s) -> %(field_name)s (%(field_code)s): state: (%(state)s)\n" % values
return summary
def default_get(self, cr, uid, fields_list, context=None):
res = {}
res['name'] = '.pickle'
res['summary'] = self._get_summary_value(cr, uid, context)
res['state'] = self._get_state_value(cr, uid, context)
res['msg'] = _("""Before executing the anonymization process, you should make a backup of your database.""")
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, *args, **kwargs):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if context is None:
context = {}
step = context.get('step', 'new_window')
res = super(ir_model_fields_anonymize_wizard, self).fields_view_get(cr, uid, view_id, view_type, context=context, *args, **kwargs)
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("group[@name='placeholder1']")
if len(placeholder):
placeholder = placeholder[0]
if step == 'new_window' and state == 'clear':
# clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Warning'}))
eview.remove(placeholder)
elif step == 'new_window' and state == 'anonymized':
# clicked in the menu and the fields are already anonymized
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_import', 'required': "1"}))
placeholder.addnext(etree.Element('label', {'string': 'Anonymization file'}))
eview.remove(placeholder)
elif step == 'just_anonymized':
# we just ran the anonymization process, we need the file export field
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_export'}))
# we need to remove the button:
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
elif step == 'just_desanonymized':
# we just reversed the anonymization process, we don't need any field
# we need to remove the button
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
else:
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything else.")
raise osv.except_osv('Error!', msg)
res['arch'] = etree.tostring(eview)
return res
def _raise_after_history_update(self, cr, uid, history_id, error_type, error_msg):
self.pool.get('ir.model.fields.anonymization.history').write(cr, uid, history_id, {
'state': 'in_exception',
'msg': error_msg,
})
raise osv.except_osv(error_type, error_msg)
def anonymize_database(self, cr, uid, ids, context=None):
"""Sets the 'anonymized' state to defined fields"""
# create a new history record:
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'clear -> anonymized',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'clear' state
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if state == 'anonymized':
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("The database is currently anonymized, you cannot anonymize it again."))
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# do the anonymization:
dirpath = os.environ.get('HOME') or os.getcwd()
rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname, history_id)
abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath))
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = ir_model_fields_anonymization_model.browse(cr, uid, field_ids, context=context)
if not fields:
msg = "No fields are going to be anonymized."
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
data = []
for field in fields:
model_name = field.model_id.model
field_name = field.field_id.name
field_type = field.field_id.ttype
table_name = self.pool[model_name]._table
# get the current value
sql = "select id, %s from %s" % (field_name, table_name)
cr.execute(sql)
records = cr.dictfetchall()
for record in records:
data.append({"model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name]})
# anonymize the value:
anonymized_value = None
sid = str(record['id'])
if field_type == 'char':
anonymized_value = 'xxx'+sid
elif field_type == 'selection':
anonymized_value = 'xxx'+sid
elif field_type == 'text':
anonymized_value = 'xxx'+sid
elif field_type == 'boolean':
anonymized_value = random.choice([True, False])
elif field_type == 'date':
anonymized_value = '2011-11-11'
elif field_type == 'datetime':
anonymized_value = '2011-11-11 11:11:11'
elif field_type == 'float':
anonymized_value = 0.0
elif field_type == 'integer':
anonymized_value = 0
elif field_type in ['binary', 'many2many', 'many2one', 'one2many', 'reference']: # cannot anonymize these kind of fields
msg = _("Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
if anonymized_value is None:
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("Anonymized value is None. This cannot happens."))
sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % {
'table': table_name,
'field': field_name,
}
cr.execute(sql, {
'anonymized_value': anonymized_value,
'id': record['id']
})
# save pickle:
fn = open(abs_filepath, 'w')
pickle.dump(data, fn, pickle.HIGHEST_PROTOCOL)
# update the anonymization fields:
values = {
'state': 'anonymized',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msgs = ["Anonymization successful.",
"",
"Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.",
"",
"This file is also stored in the %s directory. The absolute file path is: %s.",
]
msg = '\n'.join(msgs) % (dirpath, abs_filepath)
fn = open(abs_filepath, 'r')
self.write(cr, uid, ids, {
'msg': msg,
'file_export': base64.encodestring(fn.read()),
})
fn.close()
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': abs_filepath,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_anonymized'},
'target':'new',
}
def reverse_anonymize_database(self, cr, uid, ids, context=None):
"""Set the 'clear' state to defined fields"""
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
# create a new history record:
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'anonymized -> clear',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'anonymized' state
state = ir_model_fields_anonymization_model._get_global_state(cr, uid, context=context)
if state == 'clear':
raise osv.except_osv_('Error!', "The database is not currently anonymized, you cannot reverse the anonymization.")
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
raise osv.except_osv('Error!', msg)
wizards = self.browse(cr, uid, ids, context=context)
for wizard in wizards:
if not wizard.file_import:
msg = _("It is not possible to reverse the anonymization process without supplying the anonymization export file.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# reverse the anonymization:
# load the pickle file content into a data structure:
data = pickle.loads(base64.decodestring(wizard.file_import))
migration_fix_obj = self.pool.get('ir.model.fields.anonymization.migration.fix')
fix_ids = migration_fix_obj.search(cr, uid, [('target_version', '=', '8.0')])
fixes = migration_fix_obj.read(cr, uid, fix_ids, ['model_name', 'field_name', 'query', 'query_type', 'sequence'])
fixes = group(fixes, ('model_name', 'field_name'))
for line in data:
queries = []
table_name = self.pool[line['model_id']]._table if line['model_id'] in self.pool else None
# check if custom sql exists:
key = (line['model_id'], line['field_id'])
custom_updates = fixes.get(key)
if custom_updates:
custom_updates.sort(key=itemgetter('sequence'))
queries = [(record['query'], record['query_type']) for record in custom_updates if record['query_type']]
elif table_name:
queries = [("update %(table)s set %(field)s = %%(value)s where id = %%(id)s" % {
'table': table_name,
'field': line['field_id'],
}, 'sql')]
for query in queries:
if query[1] == 'sql':
sql = query[0]
cr.execute(sql, {
'value': line['value'],
'id': line['id']
})
elif query[1] == 'python':
raw_code = query[0]
code = raw_code % line
eval(code)
else:
raise Exception("Unknown query type '%s'. Valid types are: sql, python." % (query['query_type'], ))
# update the anonymization fields:
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
values = {
'state': 'clear',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msg = '\n'.join(["Successfully reversed the anonymization.",
"",
])
self.write(cr, uid, ids, {'msg': msg})
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': False,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_desanonymized'},
'target':'new',
}
def _id_get(self, cr, uid, model, id_str, mod):
if '.' in id_str:
mod, id_str = id_str.split('.')
try:
idn = self.pool.get('ir.model.data')._get_id(cr, uid, mod, id_str)
res = int(self.pool.get('ir.model.data').read(cr, uid, [idn], ['res_id'])[0]['res_id'])
except:
res = None
return res
class ir_model_fields_anonymization_migration_fix(osv.osv):
_name = 'ir.model.fields.anonymization.migration.fix'
_order = "sequence"
_columns = {
'target_version': fields.char('Target Version'),
'model_name': fields.char('Model'),
'field_name': fields.char('Field'),
'query': fields.text('Query'),
'query_type': fields.selection(string='Query', selection=[('sql', 'sql'), ('python', 'python')]),
'sequence': fields.integer('Sequence'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
proversity-org/edx-platform | cms/djangoapps/contentstore/tests/tests.py | 3 | 15848 | """
This test file will test registration, login, activation, and session activity timeouts
"""
from __future__ import print_function
import datetime
import time
import mock
import pytest
from ddt import data, ddt, unpack
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from freezegun import freeze_time
from pytz import UTC
from six.moves import xrange
from contentstore.models import PushNotificationConfig
from contentstore.tests.test_course_settings import CourseTestCase
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json, registration, user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class ContentStoreTestCase(ModuleStoreTestCase):
def _login(self, email, password):
"""
Login. View should always return 200. The success/fail is in the
returned json
"""
resp = self.client.post(
reverse('login_post'),
{'email': email, 'password': password}
)
self.assertEqual(resp.status_code, 200)
return resp
def login(self, email, password):
"""Login, check that it worked."""
resp = self._login(email, password)
data = parse_json(resp)
self.assertTrue(data['success'])
return resp
def _create_account(self, username, email, password):
"""Try to create an account. No error checking"""
resp = self.client.post('/create_account', {
'username': username,
'email': email,
'password': password,
'location': 'home',
'language': 'Franglish',
'name': 'Fred Weasley',
'terms_of_service': 'true',
'honor_code': 'true',
})
return resp
def create_account(self, username, email, password):
"""Create the account and check that it worked"""
resp = self._create_account(username, email, password)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertEqual(data['success'], True)
# Check both that the user is created, and inactive
self.assertFalse(user(email).is_active)
return resp
def _activate_user(self, email):
"""Look up the activation key for the user, then hit the activate view.
No error checking"""
activation_key = registration(email).activation_key
# and now we try to activate
resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))
return resp
def activate_user(self, email):
resp = self._activate_user(email)
self.assertEqual(resp.status_code, 200)
# Now make sure that the user is now actually activated
self.assertTrue(user(email).is_active)
@pytest.mark.django_db
def test_create_account_email_already_exists(django_db_use_migrations):
"""
This is tricky. Django's user model doesn't have a constraint on
unique email addresses, but we *add* that constraint during the
migration process:
see common/djangoapps/student/migrations/0004_add_email_index.py
The behavior we *want* is for this account creation request
to fail, due to this uniqueness constraint, but the request will
succeed if the migrations have not run.
django_db_use_migration is a pytest fixture that tells us if
migrations have been run. Since pytest fixtures don't play nice
with TestCase objects this is a function and doesn't get to use
assertRaises.
"""
if django_db_use_migrations:
email = 'a@b.com'
pw = 'xyz'
username = 'testuser'
User.objects.create_user(username, email, pw)
# Hack to use the _create_account shortcut
case = ContentStoreTestCase()
resp = case._create_account("abcdef", email, "password") # pylint: disable=protected-access
assert resp.status_code == 400, 'Migrations are run, but creating an account with duplicate email succeeded!'
class AuthTestCase(ContentStoreTestCase):
"""Check that various permissions-related things work"""
CREATE_USER = False
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super(AuthTestCase, self).setUp()
self.email = 'a@b.com'
self.pw = 'xyz'
self.username = 'testuser'
self.client = AjaxEnabledTestClient()
# clear the cache so ratelimiting won't affect these tests
cache.clear()
def check_page_get(self, url, expected):
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, expected)
return resp
def test_public_pages_load(self):
"""Make sure pages that don't require login load without error."""
pages = (
reverse('login'),
reverse('signup'),
)
for page in pages:
print("Checking '{0}'".format(page))
self.check_page_get(page, 200)
def test_create_account_errors(self):
# No post data -- should fail
resp = self.client.post('/create_account', {})
self.assertEqual(resp.status_code, 400)
data = parse_json(resp)
self.assertEqual(data['success'], False)
def test_create_account(self):
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
def test_create_account_username_already_exists(self):
User.objects.create_user(self.username, self.email, self.pw)
resp = self._create_account(self.username, "abc@def.com", "password")
# we have a constraint on unique usernames, so this should fail
self.assertEqual(resp.status_code, 400)
def test_create_account_pw_already_exists(self):
User.objects.create_user(self.username, self.email, self.pw)
resp = self._create_account("abcdef", "abc@def.com", self.pw)
# we can have two users with the same password, so this should succeed
self.assertEqual(resp.status_code, 200)
def test_login(self):
self.create_account(self.username, self.email, self.pw)
# Not activated yet. Login should fail.
resp = self._login(self.email, self.pw)
data = parse_json(resp)
self.assertFalse(data['success'])
self.activate_user(self.email)
# Now login should work
self.login(self.email, self.pw)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in xrange(30):
resp = self._login(self.email, 'wrong_password{0}'.format(i))
self.assertEqual(resp.status_code, 200)
resp = self._login(self.email, 'wrong_password')
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
self.assertIn('Too many failed login attempts.', data['value'])
@override_settings(MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED=3)
@override_settings(MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS=2)
def test_excessive_login_failures(self):
# try logging in 3 times, the account should get locked for 3 seconds
# note we want to keep the lockout time short, so we don't slow down the tests
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': True}):
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
for i in xrange(3):
resp = self._login(self.email, 'wrong_password{0}'.format(i))
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
self.assertIn(
'Email or password is incorrect.',
data['value']
)
# now the account should be locked
resp = self._login(self.email, 'wrong_password')
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
self.assertIn(
'This account has been temporarily locked due to excessive login failures. Try again later.',
data['value']
)
with freeze_time('2100-01-01'):
self.login(self.email, self.pw)
# make sure the failed attempt counter gets reset on successful login
resp = self._login(self.email, 'wrong_password')
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
# account should not be locked out after just one attempt
self.login(self.email, self.pw)
# do one more login when there is no bad login counter row at all in the database to
# test the "ObjectNotFound" case
self.login(self.email, self.pw)
def test_login_link_on_activation_age(self):
self.create_account(self.username, self.email, self.pw)
# we want to test the rendering of the activation page when the user isn't logged in
self.client.logout()
resp = self._activate_user(self.email)
self.assertEqual(resp.status_code, 200)
# check the the HTML has links to the right login page. Note that this is merely a content
# check and thus could be fragile should the wording change on this page
expected = 'You can now <a href="' + reverse('login') + '">sign in</a>.'
self.assertIn(expected, resp.content.decode('utf-8'))
def test_private_pages_auth(self):
"""Make sure pages that do require login work."""
auth_pages = (
'/home/',
)
# These are pages that should just load when the user is logged in
# (no data needed)
simple_auth_pages = (
'/home/',
)
# need an activated user
self.test_create_account()
# Create a new session
self.client = AjaxEnabledTestClient()
# Not logged in. Should redirect to login.
print('Not logged in')
for page in auth_pages:
print("Checking '{0}'".format(page))
self.check_page_get(page, expected=302)
# Logged in should work.
self.login(self.email, self.pw)
print('Logged in')
for page in simple_auth_pages:
print("Checking '{0}'".format(page))
self.check_page_get(page, expected=200)
def test_index_auth(self):
# not logged in. Should return a redirect.
resp = self.client.get_html('/home/')
self.assertEqual(resp.status_code, 302)
# Logged in should work.
@override_settings(SESSION_INACTIVITY_TIMEOUT_IN_SECONDS=1)
def test_inactive_session_timeout(self):
"""
Verify that an inactive session times out and redirects to the
login page
"""
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
self.login(self.email, self.pw)
# make sure we can access courseware immediately
course_url = '/home/'
resp = self.client.get_html(course_url)
self.assertEquals(resp.status_code, 200)
# then wait a bit and see if we get timed out
time.sleep(2)
resp = self.client.get_html(course_url)
# re-request, and we should get a redirect to login page
self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=/home/')
@mock.patch.dict(settings.FEATURES, {"ALLOW_PUBLIC_ACCOUNT_CREATION": False})
def test_signup_button_index_page(self):
"""
Navigate to the home page and check the Sign Up button is hidden when ALLOW_PUBLIC_ACCOUNT_CREATION flag
is turned off
"""
response = self.client.get(reverse('homepage'))
self.assertNotIn('<a class="action action-signup" href="/signup">Sign Up</a>', response.content)
@mock.patch.dict(settings.FEATURES, {"ALLOW_PUBLIC_ACCOUNT_CREATION": False})
def test_signup_button_login_page(self):
"""
Navigate to the login page and check the Sign Up button is hidden when ALLOW_PUBLIC_ACCOUNT_CREATION flag
is turned off
"""
response = self.client.get(reverse('login'))
self.assertNotIn('<a class="action action-signup" href="/signup">Sign Up</a>', response.content)
@mock.patch.dict(settings.FEATURES, {"ALLOW_PUBLIC_ACCOUNT_CREATION": False})
def test_signup_link_login_page(self):
"""
Navigate to the login page and check the Sign Up link is hidden when ALLOW_PUBLIC_ACCOUNT_CREATION flag
is turned off
"""
response = self.client.get(reverse('login'))
self.assertNotIn('<a href="/signup" class="action action-signin">Don't have a Studio Account? Sign up!</a>',
response.content)
class ForumTestCase(CourseTestCase):
def setUp(self):
""" Creates the test course. """
super(ForumTestCase, self).setUp()
self.course = CourseFactory.create(org='testX', number='727', display_name='Forum Course')
def test_blackouts(self):
now = datetime.datetime.now(UTC)
times1 = [
(now - datetime.timedelta(days=14), now - datetime.timedelta(days=11)),
(now + datetime.timedelta(days=24), now + datetime.timedelta(days=30))
]
self.course.discussion_blackouts = [(t.isoformat(), t2.isoformat()) for t, t2 in times1]
self.assertTrue(self.course.forum_posts_allowed)
times2 = [
(now - datetime.timedelta(days=14), now + datetime.timedelta(days=2)),
(now + datetime.timedelta(days=24), now + datetime.timedelta(days=30))
]
self.course.discussion_blackouts = [(t.isoformat(), t2.isoformat()) for t, t2 in times2]
self.assertFalse(self.course.forum_posts_allowed)
# test if user gives empty blackout date it should return true for forum_posts_allowed
self.course.discussion_blackouts = [[]]
self.assertTrue(self.course.forum_posts_allowed)
@ddt
class CourseKeyVerificationTestCase(CourseTestCase):
def setUp(self):
"""
Create test course.
"""
super(CourseKeyVerificationTestCase, self).setUp()
self.course = CourseFactory.create(org='edX', number='test_course_key', display_name='Test Course')
@data(('edX/test_course_key/Test_Course', 200), ('garbage:edX+test_course_key+Test_Course', 404))
@unpack
def test_course_key_decorator(self, course_key, status_code):
"""
Tests for the ensure_valid_course_key decorator.
"""
url = '/import/{course_key}'.format(course_key=course_key)
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, status_code)
url = '/import_status/{course_key}/{filename}'.format(
course_key=course_key,
filename='xyz.tar.gz'
)
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, status_code)
class PushNotificationConfigTestCase(TestCase):
"""
Tests PushNotificationConfig.
"""
def test_notifications_defaults(self):
self.assertFalse(PushNotificationConfig.is_enabled())
def test_notifications_enabled(self):
PushNotificationConfig(enabled=True).save()
self.assertTrue(PushNotificationConfig.is_enabled())
| agpl-3.0 |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/test/test_ast.py | 45 | 24761 | import sys, itertools, unittest
from test import test_support
import ast
def to_tuple(t):
if t is None or isinstance(t, (basestring, int, long, complex)):
return t
elif isinstance(t, list):
return [to_tuple(e) for e in t]
result = [t.__class__.__name__]
if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
result.append((t.lineno, t.col_offset))
if t._fields is None:
return tuple(result)
for f in t._fields:
result.append(to_tuple(getattr(t, f)))
return tuple(result)
# These tests are compiled through "exec"
# There should be at least one test per statement
exec_tests = [
# None
"None",
# FunctionDef
"def f(): pass",
# FunctionDef with arg
"def f(a): pass",
# FunctionDef with arg and default value
"def f(a=0): pass",
# FunctionDef with varargs
"def f(*args): pass",
# FunctionDef with kwargs
"def f(**kwargs): pass",
# FunctionDef with all kind of args
"def f(a, b=1, c=None, d=[], e={}, *args, **kwargs): pass",
# ClassDef
"class C:pass",
# ClassDef, new style class
"class C(object): pass",
# Return
"def f():return 1",
# Delete
"del v",
# Assign
"v = 1",
# AugAssign
"v += 1",
# Print
"print >>f, 1, ",
# For
"for v in v:pass",
# While
"while v:pass",
# If
"if v:pass",
# Raise
"raise Exception, 'string'",
# TryExcept
"try:\n pass\nexcept Exception:\n pass",
# TryFinally
"try:\n pass\nfinally:\n pass",
# Assert
"assert v",
# Import
"import sys",
# ImportFrom
"from sys import v",
# Exec
"exec 'v'",
# Global
"global v",
# Expr
"1",
# Pass,
"pass",
# Break
"break",
# Continue
"continue",
# for statements with naked tuples (see http://bugs.python.org/issue6704)
"for a,b in c: pass",
"[(a,b) for a,b in c]",
"((a,b) for a,b in c)",
"((a,b) for (a,b) in c)",
# Multiline generator expression (test for .lineno & .col_offset)
"""(
(
Aa
,
Bb
)
for
Aa
,
Bb in Cc
)""",
# dictcomp
"{a : b for w in x for m in p if g}",
# dictcomp with naked tuple
"{a : b for v,w in x}",
# setcomp
"{r for l in x if g}",
# setcomp with naked tuple
"{r for l,m in x}",
]
# These are compiled through "single"
# because of overlap with "eval", it just tests what
# can't be tested with "eval"
single_tests = [
"1+2"
]
# These are compiled through "eval"
# It should test all expressions
eval_tests = [
# None
"None",
# BoolOp
"a and b",
# BinOp
"a + b",
# UnaryOp
"not v",
# Lambda
"lambda:None",
# Dict
"{ 1:2 }",
# Empty dict
"{}",
# Set
"{None,}",
# Multiline dict (test for .lineno & .col_offset)
"""{
1
:
2
}""",
# ListComp
"[a for b in c if d]",
# GeneratorExp
"(a for b in c if d)",
# Yield - yield expressions can't work outside a function
#
# Compare
"1 < 2 < 3",
# Call
"f(1,2,c=3,*d,**e)",
# Repr
"`v`",
# Num
"10L",
# Str
"'string'",
# Attribute
"a.b",
# Subscript
"a[b:c]",
# Name
"v",
# List
"[1,2,3]",
# Empty list
"[]",
# Tuple
"1,2,3",
# Tuple
"(1,2,3)",
# Empty tuple
"()",
# Combination
"a.b.c.d(a.b[1:2])",
]
# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension
# excepthandler, arguments, keywords, alias
class AST_Tests(unittest.TestCase):
def _assertTrueorder(self, ast_node, parent_pos):
if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
return
if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
node_pos = (ast_node.lineno, ast_node.col_offset)
self.assertTrue(node_pos >= parent_pos)
parent_pos = (ast_node.lineno, ast_node.col_offset)
for name in ast_node._fields:
value = getattr(ast_node, name)
if isinstance(value, list):
for child in value:
self._assertTrueorder(child, parent_pos)
elif value is not None:
self._assertTrueorder(value, parent_pos)
def test_AST_objects(self):
x = ast.AST()
self.assertEqual(x._fields, ())
with self.assertRaises(AttributeError):
x.vararg
with self.assertRaises(AttributeError):
x.foobar = 21
with self.assertRaises(AttributeError):
ast.AST(lineno=2)
with self.assertRaises(TypeError):
# "_ast.AST constructor takes 0 positional arguments"
ast.AST(2)
def test_snippets(self):
for input, output, kind in ((exec_tests, exec_results, "exec"),
(single_tests, single_results, "single"),
(eval_tests, eval_results, "eval")):
for i, o in itertools.izip(input, output):
ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST)
self.assertEqual(to_tuple(ast_tree), o)
self._assertTrueorder(ast_tree, (0, 0))
def test_slice(self):
slc = ast.parse("x[::]").body[0].value.slice
self.assertIsNone(slc.upper)
self.assertIsNone(slc.lower)
self.assertIsInstance(slc.step, ast.Name)
self.assertEqual(slc.step.id, "None")
def test_from_import(self):
im = ast.parse("from . import y").body[0]
self.assertIsNone(im.module)
def test_non_interned_future_from_ast(self):
mod = ast.parse("from __future__ import division")
self.assertIsInstance(mod.body[0], ast.ImportFrom)
mod.body[0].module = " __future__ ".strip()
compile(mod, "<test>", "exec")
def test_base_classes(self):
self.assertTrue(issubclass(ast.For, ast.stmt))
self.assertTrue(issubclass(ast.Name, ast.expr))
self.assertTrue(issubclass(ast.stmt, ast.AST))
self.assertTrue(issubclass(ast.expr, ast.AST))
self.assertTrue(issubclass(ast.comprehension, ast.AST))
self.assertTrue(issubclass(ast.Gt, ast.AST))
def test_field_attr_existence(self):
for name, item in ast.__dict__.iteritems():
if isinstance(item, type) and name != 'AST' and name[0].isupper():
x = item()
if isinstance(x, ast.AST):
self.assertEqual(type(x._fields), tuple)
def test_arguments(self):
x = ast.arguments()
self.assertEqual(x._fields, ('args', 'vararg', 'kwarg', 'defaults'))
with self.assertRaises(AttributeError):
x.vararg
x = ast.arguments(1, 2, 3, 4)
self.assertEqual(x.vararg, 2)
def test_field_attr_writable(self):
x = ast.Num()
# We can assign to _fields
x._fields = 666
self.assertEqual(x._fields, 666)
def test_classattrs(self):
x = ast.Num()
self.assertEqual(x._fields, ('n',))
with self.assertRaises(AttributeError):
x.n
x = ast.Num(42)
self.assertEqual(x.n, 42)
with self.assertRaises(AttributeError):
x.lineno
with self.assertRaises(AttributeError):
x.foobar
x = ast.Num(lineno=2)
self.assertEqual(x.lineno, 2)
x = ast.Num(42, lineno=0)
self.assertEqual(x.lineno, 0)
self.assertEqual(x._fields, ('n',))
self.assertEqual(x.n, 42)
self.assertRaises(TypeError, ast.Num, 1, 2)
self.assertRaises(TypeError, ast.Num, 1, 2, lineno=0)
def test_module(self):
body = [ast.Num(42)]
x = ast.Module(body)
self.assertEqual(x.body, body)
def test_nodeclasses(self):
# Zero arguments constructor explicitely allowed
x = ast.BinOp()
self.assertEqual(x._fields, ('left', 'op', 'right'))
# Random attribute allowed too
x.foobarbaz = 5
self.assertEqual(x.foobarbaz, 5)
n1 = ast.Num(1)
n3 = ast.Num(3)
addop = ast.Add()
x = ast.BinOp(n1, addop, n3)
self.assertEqual(x.left, n1)
self.assertEqual(x.op, addop)
self.assertEqual(x.right, n3)
x = ast.BinOp(1, 2, 3)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
x = ast.BinOp(1, 2, 3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, lineno=0)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0)
# can set attributes through kwargs too
x = ast.BinOp(left=1, op=2, right=3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# Random kwargs also allowed
x = ast.BinOp(1, 2, 3, foobarbaz=42)
self.assertEqual(x.foobarbaz, 42)
def test_no_fields(self):
# this used to fail because Sub._fields was None
x = ast.Sub()
self.assertEqual(x._fields, ())
def test_pickling(self):
import pickle
mods = [pickle]
try:
import cPickle
mods.append(cPickle)
except ImportError:
pass
protocols = [0, 1, 2]
for mod in mods:
for protocol in protocols:
for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests):
ast2 = mod.loads(mod.dumps(ast, protocol))
self.assertEqual(to_tuple(ast2), to_tuple(ast))
def test_invalid_identitifer(self):
m = ast.Module([ast.Expr(ast.Name(u"x", ast.Load()))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("identifier must be of type str", str(cm.exception))
def test_invalid_string(self):
m = ast.Module([ast.Expr(ast.Str(43))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("string must be of type str or uni", str(cm.exception))
class ASTHelpers_Test(unittest.TestCase):
def test_parse(self):
a = ast.parse('foo(1 + 1)')
b = compile('foo(1 + 1)', '<unknown>', 'exec', ast.PyCF_ONLY_AST)
self.assertEqual(ast.dump(a), ast.dump(b))
def test_dump(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
"args=[Name(id='eggs', ctx=Load()), Str(s='and cheese')], "
"keywords=[], starargs=None, kwargs=None))])"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
"Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
"Str('and cheese')], [], None, None))])"
)
self.assertEqual(ast.dump(node, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
"lineno=1, col_offset=0), args=[Name(id='eggs', ctx=Load(), "
"lineno=1, col_offset=5), Str(s='and cheese', lineno=1, "
"col_offset=11)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0)])"
)
def test_copy_location(self):
src = ast.parse('1 + 1', mode='eval')
src.body.right = ast.copy_location(ast.Num(2), src.body.right)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=1, col_offset=0), '
'op=Add(), right=Num(n=2, lineno=1, col_offset=4), lineno=1, '
'col_offset=0))'
)
def test_fix_missing_locations(self):
src = ast.parse('write("spam")')
src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()),
[ast.Str('eggs')], [], None, None)))
self.assertEqual(src, ast.fix_missing_locations(src))
self.assertEqual(ast.dump(src, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
"lineno=1, col_offset=0), args=[Str(s='spam', lineno=1, "
"col_offset=6)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0), "
"Expr(value=Call(func=Name(id='spam', ctx=Load(), lineno=1, "
"col_offset=0), args=[Str(s='eggs', lineno=1, col_offset=0)], "
"keywords=[], starargs=None, kwargs=None, lineno=1, "
"col_offset=0), lineno=1, col_offset=0)])"
)
def test_increment_lineno(self):
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src, n=3), src)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
# issue10869: do not increment lineno of root twice
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src.body, n=3), src.body)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
def test_iter_fields(self):
node = ast.parse('foo()', mode='eval')
d = dict(ast.iter_fields(node.body))
self.assertEqual(d.pop('func').id, 'foo')
self.assertEqual(d, {'keywords': [], 'kwargs': None,
'args': [], 'starargs': None})
def test_iter_child_nodes(self):
node = ast.parse("spam(23, 42, eggs='leek')", mode='eval')
self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4)
iterator = ast.iter_child_nodes(node.body)
self.assertEqual(next(iterator).id, 'spam')
self.assertEqual(next(iterator).n, 23)
self.assertEqual(next(iterator).n, 42)
self.assertEqual(ast.dump(next(iterator)),
"keyword(arg='eggs', value=Str(s='leek'))"
)
def test_get_docstring(self):
node = ast.parse('def foo():\n """line one\n line two"""')
self.assertEqual(ast.get_docstring(node.body[0]),
'line one\nline two')
def test_literal_eval(self):
self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3])
self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42})
self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None))
self.assertRaises(ValueError, ast.literal_eval, 'foo()')
def test_literal_eval_issue4907(self):
self.assertEqual(ast.literal_eval('2j'), 2j)
self.assertEqual(ast.literal_eval('10 + 2j'), 10 + 2j)
self.assertEqual(ast.literal_eval('1.5 - 2j'), 1.5 - 2j)
self.assertRaises(ValueError, ast.literal_eval, '2 + (3 + 4j)')
def test_main():
with test_support.check_py3k_warnings(("backquote not supported",
SyntaxWarning)):
test_support.run_unittest(AST_Tests, ASTHelpers_Test)
def main():
if __name__ != '__main__':
return
if sys.argv[1:] == ['-g']:
for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
(eval_tests, "eval")):
print kind+"_results = ["
for s in statements:
print repr(to_tuple(compile(s, "?", kind, 0x400)))+","
print "]"
print "main()"
raise SystemExit
test_main()
#### EVERYTHING BELOW IS GENERATED #####
exec_results = [
('Module', [('Expr', (1, 0), ('Name', (1, 0), 'None', ('Load',)))]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Pass', (1, 9))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, []), [('Pass', (1, 10))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, [('Num', (1, 8), 0)]), [('Pass', (1, 12))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], 'args', None, []), [('Pass', (1, 14))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, 'kwargs', []), [('Pass', (1, 17))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',)), ('Name', (1, 9), 'b', ('Param',)), ('Name', (1, 14), 'c', ('Param',)), ('Name', (1, 22), 'd', ('Param',)), ('Name', (1, 28), 'e', ('Param',))], 'args', 'kwargs', [('Num', (1, 11), 1), ('Name', (1, 16), 'None', ('Load',)), ('List', (1, 24), [], ('Load',)), ('Dict', (1, 30), [], [])]), [('Pass', (1, 52))], [])]),
('Module', [('ClassDef', (1, 0), 'C', [], [('Pass', (1, 8))], [])]),
('Module', [('ClassDef', (1, 0), 'C', [('Name', (1, 8), 'object', ('Load',))], [('Pass', (1, 17))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Return', (1, 8), ('Num', (1, 15), 1))], [])]),
('Module', [('Delete', (1, 0), [('Name', (1, 4), 'v', ('Del',))])]),
('Module', [('Assign', (1, 0), [('Name', (1, 0), 'v', ('Store',))], ('Num', (1, 4), 1))]),
('Module', [('AugAssign', (1, 0), ('Name', (1, 0), 'v', ('Store',)), ('Add',), ('Num', (1, 5), 1))]),
('Module', [('Print', (1, 0), ('Name', (1, 8), 'f', ('Load',)), [('Num', (1, 11), 1)], False)]),
('Module', [('For', (1, 0), ('Name', (1, 4), 'v', ('Store',)), ('Name', (1, 9), 'v', ('Load',)), [('Pass', (1, 11))], [])]),
('Module', [('While', (1, 0), ('Name', (1, 6), 'v', ('Load',)), [('Pass', (1, 8))], [])]),
('Module', [('If', (1, 0), ('Name', (1, 3), 'v', ('Load',)), [('Pass', (1, 5))], [])]),
('Module', [('Raise', (1, 0), ('Name', (1, 6), 'Exception', ('Load',)), ('Str', (1, 17), 'string'), None)]),
('Module', [('TryExcept', (1, 0), [('Pass', (2, 2))], [('ExceptHandler', (3, 0), ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))])], [])]),
('Module', [('TryFinally', (1, 0), [('Pass', (2, 2))], [('Pass', (4, 2))])]),
('Module', [('Assert', (1, 0), ('Name', (1, 7), 'v', ('Load',)), None)]),
('Module', [('Import', (1, 0), [('alias', 'sys', None)])]),
('Module', [('ImportFrom', (1, 0), 'sys', [('alias', 'v', None)], 0)]),
('Module', [('Exec', (1, 0), ('Str', (1, 5), 'v'), None, None)]),
('Module', [('Global', (1, 0), ['v'])]),
('Module', [('Expr', (1, 0), ('Num', (1, 0), 1))]),
('Module', [('Pass', (1, 0))]),
('Module', [('Break', (1, 0))]),
('Module', [('Continue', (1, 0))]),
('Module', [('For', (1, 0), ('Tuple', (1, 4), [('Name', (1, 4), 'a', ('Store',)), ('Name', (1, 6), 'b', ('Store',))], ('Store',)), ('Name', (1, 11), 'c', ('Load',)), [('Pass', (1, 14))], [])]),
('Module', [('Expr', (1, 0), ('ListComp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 12), [('Name', (1, 12), 'a', ('Store',)), ('Name', (1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 20), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (2, 4), ('Tuple', (3, 4), [('Name', (3, 4), 'Aa', ('Load',)), ('Name', (5, 7), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4), [('Name', (8, 4), 'Aa', ('Store',)), ('Name', (10, 4), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10), 'Cc', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Name', (1, 11), 'w', ('Store',)), ('Name', (1, 16), 'x', ('Load',)), []), ('comprehension', ('Name', (1, 22), 'm', ('Store',)), ('Name', (1, 27), 'p', ('Load',)), [('Name', (1, 32), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]),
]
single_results = [
('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]),
]
eval_results = [
('Expression', ('Name', (1, 0), 'None', ('Load',))),
('Expression', ('BoolOp', (1, 0), ('And',), [('Name', (1, 0), 'a', ('Load',)), ('Name', (1, 6), 'b', ('Load',))])),
('Expression', ('BinOp', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Add',), ('Name', (1, 4), 'b', ('Load',)))),
('Expression', ('UnaryOp', (1, 0), ('Not',), ('Name', (1, 4), 'v', ('Load',)))),
('Expression', ('Lambda', (1, 0), ('arguments', [], None, None, []), ('Name', (1, 7), 'None', ('Load',)))),
('Expression', ('Dict', (1, 0), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])),
('Expression', ('Dict', (1, 0), [], [])),
('Expression', ('Set', (1, 0), [('Name', (1, 1), 'None', ('Load',))])),
('Expression', ('Dict', (1, 0), [('Num', (2, 6), 1)], [('Num', (4, 10), 2)])),
('Expression', ('ListComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('GeneratorExp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('Compare', (1, 0), ('Num', (1, 0), 1), [('Lt',), ('Lt',)], [('Num', (1, 4), 2), ('Num', (1, 8), 3)])),
('Expression', ('Call', (1, 0), ('Name', (1, 0), 'f', ('Load',)), [('Num', (1, 2), 1), ('Num', (1, 4), 2)], [('keyword', 'c', ('Num', (1, 8), 3))], ('Name', (1, 11), 'd', ('Load',)), ('Name', (1, 15), 'e', ('Load',)))),
('Expression', ('Repr', (1, 0), ('Name', (1, 1), 'v', ('Load',)))),
('Expression', ('Num', (1, 0), 10L)),
('Expression', ('Str', (1, 0), 'string')),
('Expression', ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',))),
('Expression', ('Subscript', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Slice', ('Name', (1, 2), 'b', ('Load',)), ('Name', (1, 4), 'c', ('Load',)), None), ('Load',))),
('Expression', ('Name', (1, 0), 'v', ('Load',))),
('Expression', ('List', (1, 0), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('List', (1, 0), [], ('Load',))),
('Expression', ('Tuple', (1, 0), [('Num', (1, 0), 1), ('Num', (1, 2), 2), ('Num', (1, 4), 3)], ('Load',))),
('Expression', ('Tuple', (1, 1), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('Tuple', (1, 0), [], ('Load',))),
('Expression', ('Call', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8), ('Attribute', (1, 8), ('Name', (1, 8), 'a', ('Load',)), 'b', ('Load',)), ('Slice', ('Num', (1, 12), 1), ('Num', (1, 14), 2), None), ('Load',))], [], None, None)),
]
main()
| mit |
jodygarnett/qgis-geoserver-plugin | src/geoserverexplorer/gui/gsoperations.py | 1 | 6538 | from PyQt4 import QtCore
from qgis.core import *
from geoserverexplorer.qgis import layers as qgislayers
from geoserverexplorer.qgis.catalog import CatalogWrapper
from geoserverexplorer.gui.confirm import publishLayer
from geoserverexplorer.gui.dialogs.projectdialog import PublishProjectDialog
from geoserver.catalog import ConflictingDataError
from geoserverexplorer.gui.dialogs.layerdialog import PublishLayersDialog
def publishDraggedGroup(explorer, groupItem, catalog, workspace):
groupName = groupItem.element
groups = qgislayers.getGroups()
group = groups[groupName]
gslayers= [layer.name for layer in catalog.get_layers()]
missing = []
overwrite = bool(QtCore.QSettings().value("/GeoServer/Settings/GeoServer/OverwriteGroupLayers", True, bool))
for layer in group:
if layer.name() not in gslayers or overwrite:
missing.append(layer)
if missing:
explorer.setProgressMaximum(len(missing), "Publish layers")
progress = 0
cat = CatalogWrapper(catalog)
for layer in missing:
explorer.setProgress(progress)
explorer.run(cat.publishLayer,
None,
[],
layer, workspace, True)
progress += 1
explorer.setProgress(progress)
explorer.resetActivity()
names = [layer.name() for layer in group]
layergroup = catalog.create_layergroup(groupName, names, names)
explorer.run(catalog.save, "Create layer group from group '" + groupName + "'",
[], layergroup)
def publishDraggedLayer(explorer, layer, workspace):
cat = workspace.catalog
cat = CatalogWrapper(cat)
ret = explorer.run(publishLayer,
"Publish layer from layer '" + layer.name() + "'",
[],
cat, layer, workspace)
return ret
def addDraggedLayerToGroup(explorer, layer, groupItem):
group = groupItem.element
styles = group.styles
layers = group.layers
if layer.name not in layers:
layers.append(layer.name)
styles.append(layer.default_style.name)
group.dirty.update(layers = layers, styles = styles)
explorer.run(layer.catalog.save,
"Update group '" + group.name + "'",
[groupItem],
group)
def addDraggedUrisToWorkspace(uris, catalog, workspace, tree):
if uris:
if len(uris) > 1:
explorer.setProgressMaximum(len(uris))
for i, uri in enumerate(uris):
if isinstance(uri, basestring):
layerName = QtCore.QFileInfo(uri).completeBaseName()
layer = QgsRasterLayer(uri, layerName)
else:
layer = QgsRasterLayer(uri.uri, uri.name)
if not layer.isValid() or layer.type() != QgsMapLayer.RasterLayer:
if isinstance(uri, basestring):
layerName = QtCore.QFileInfo(uri).completeBaseName()
layer = QgsVectorLayer(uri, layerName, "ogr")
else:
layer = QgsVectorLayer(uri.uri, uri.name, uri.providerKey)
if not layer.isValid() or layer.type() != QgsMapLayer.VectorLayer:
layer.deleteLater()
name = uri if isinstance(uri, basestring) else uri.uri
explorer.setError("Error reading file {} or it is not a valid layer file".format(name))
else:
if not publishDraggedLayer(explorer, layer, workspace):
return []
else:
if not publishDraggedLayer(explorer, layer, workspace):
return []
setProgress(i + 1)
resetActivity()
return [tree.findAllItems(catalog)[0]]
else:
return []
def addDraggedStyleToLayer(tree, explorer, styleItem, layerItem):
catalog = layerItem.element.catalog
catItem = tree.findFirstItem(catalog)
style = styleItem.element
layer = layerItem.element
if not hasattr(layer, "default_style") or layer.default_style is None:
# if default style is missing, make dragged style the layer's default
# without a default style, some GeoServer operations may fail
layer.default_style = style
else:
# add to layer's additional styles
styles = layer.styles
styles.append(style)
layer.styles = styles
explorer.run(catalog.save,
"Add style '" + style.name + "' to layer '" + layer.name + "'",
[catItem],
layer)
def publishProject(tree, explorer, catalog):
layers = qgislayers.getAllLayers()
dlg = PublishProjectDialog(catalog)
dlg.exec_()
if not dlg.ok:
return
workspace = dlg.workspace
groupName = dlg.groupName
explorer.setProgressMaximum(len(layers), "Publish layers")
progress = 0
cat = CatalogWrapper(catalog)
for layer in layers:
explorer.setProgress(progress)
explorer.run(publishLayer,
None,
[],
cat, layer, workspace)
progress += 1
explorer.setProgress(progress)
explorer.resetActivity()
groups = qgislayers.getGroups()
for group in groups:
names = [layer.name() for layer in groups[group]]
try:
layergroup = catalog.create_layergroup(group, names, names)
explorer.run(catalog.save, "Create layer group '" + group + "'",
[], layergroup)
except ConflictingDataError, e:
explorer.setWarning(str(e))
if groupName is not None:
names = [layer.name() for layer in layers]
layergroup = catalog.create_layergroup(groupName, names, names)
explorer.run(catalog.save, "Create global layer group",
[], layergroup)
tree.findAllItems(catalog)[0].refreshContent(explorer)
explorer.resetActivity()
def publishLayers(tree, explorer, catalog):
dlg = PublishLayersDialog(catalog)
dlg.exec_()
if dlg.topublish is None:
return
cat = CatalogWrapper(catalog)
progress = 0
explorer.setProgressMaximum(len(dlg.topublish), "Publish layers")
for layer, workspace, name in dlg.topublish:
explorer.run(cat.publishLayer,
None,
[],
layer, workspace, True, name)
progress += 1
explorer.setProgress(progress)
catItem = tree.findAllItems(catalog)[0]
catItem.refreshContent(explorer)
explorer.resetActivity()
| gpl-2.0 |
fgesora/odoo | addons/sale_crm/__openerp__.py | 260 | 2036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sbrunner/QGIS | tests/src/python/test_qgsserver_wfst.py | 3 | 12177 |
# -*- coding: utf-8 -*-
"""
Tests for WFS-T provider using QGIS Server through qgis_wrapped_server.py.
This is an integration test for QGIS Desktop WFS-T provider and QGIS Server
WFS-T that check if QGIS can talk to and uderstand itself.
The test uses testdata/wfs_transactional/wfs_transactional.qgs and three
initially empty shapefiles layrs with points, lines and polygons.
All WFS-T calls are executed through the QGIS WFS data provider.
The three layers are
1. populated with WFS-T
2. checked for geometry and attributes
3. modified with WFS-T
4. checked for geometry and attributes
5. emptied with WFS-T calls to delete
From build dir, run: ctest -R PyQgsServerWFST -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '05/15/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import re
import subprocess
from shutil import copytree, rmtree
import tempfile
from utilities import unitTestDataPath, waitServer
from qgis.core import (
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsPointXY,
QgsRectangle,
QgsFeatureRequest,
QgsExpression,
)
from qgis.testing import (
start_app,
unittest,
)
try:
QGIS_SERVER_WFST_PORT = os.environ['QGIS_SERVER_WFST_PORT']
except:
QGIS_SERVER_WFST_PORT = '0' # Auto
qgis_app = start_app()
class TestWFST(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.port = QGIS_SERVER_WFST_PORT
# Create tmp folder
cls.temp_path = tempfile.mkdtemp()
cls.testdata_path = cls.temp_path + '/' + 'wfs_transactional' + '/'
copytree(unitTestDataPath('wfs_transactional') + '/',
cls.temp_path + '/' + 'wfs_transactional')
cls.project_path = cls.temp_path + '/' + 'wfs_transactional' + '/' + \
'wfs_transactional.qgs'
assert os.path.exists(cls.project_path), "Project not found: %s" % \
cls.project_path
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
# Clear all test layers
for ln in ['test_point', 'test_polygon', 'test_linestring']:
cls._clearLayer(ln)
os.environ['QGIS_SERVER_PORT'] = str(cls.port)
server_path = os.path.dirname(os.path.realpath(__file__)) + \
'/qgis_wrapped_server.py'
cls.server = subprocess.Popen([sys.executable, server_path],
env=os.environ, stdout=subprocess.PIPE)
line = cls.server.stdout.readline()
cls.port = int(re.findall(b':(\d+)', line)[0])
assert cls.port != 0
# Wait for the server process to start
assert waitServer('http://127.0.0.1:%s' % cls.port), "Server is not responding!"
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
cls.server.wait()
del cls.server
# Clear all test layers
for ln in ['test_point', 'test_polygon', 'test_linestring']:
cls._clearLayer(ln)
rmtree(cls.temp_path)
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _clearLayer(cls, layer_name):
"""
Delete all features from a vector layer
"""
layer = cls._getLayer(layer_name)
layer.startEditing()
layer.deleteFeatures([f.id() for f in layer.getFeatures()])
layer.commitChanges()
assert layer.featureCount() == 0
@classmethod
def _getLayer(cls, layer_name):
"""
OGR Layer factory
"""
path = cls.testdata_path + layer_name + '.shp'
layer = QgsVectorLayer(path, layer_name, "ogr")
assert layer.isValid()
return layer
@classmethod
def _getWFSLayer(cls, type_name, layer_name=None):
"""
WFS layer factory
"""
if layer_name is None:
layer_name = 'wfs_' + type_name
parms = {
'srsname': 'EPSG:4326',
'typename': type_name,
'url': 'http://127.0.0.1:%s/?map=%s' % (cls.port,
cls.project_path),
'version': 'auto',
'table': '',
#'sql': '',
}
uri = ' '.join([("%s='%s'" % (k, v)) for k, v in list(parms.items())])
wfs_layer = QgsVectorLayer(uri, layer_name, 'WFS')
assert wfs_layer.isValid()
return wfs_layer
@classmethod
def _getFeatureByAttribute(cls, layer, attr_name, attr_value):
"""
Find the feature and return it, raise exception if not found
"""
request = QgsFeatureRequest(QgsExpression("%s=%s" % (attr_name,
attr_value)))
try:
return next(layer.dataProvider().getFeatures(request))
except StopIteration:
raise Exception("Wrong attributes in WFS layer %s" %
layer.name())
def _checkAddFeatures(self, wfs_layer, layer, features):
"""
Check features were added
"""
wfs_layer.dataProvider().addFeatures(features)
layer = self._getLayer(layer.name())
self.assertTrue(layer.isValid())
self.assertEqual(layer.featureCount(), len(features))
self.assertEqual(wfs_layer.dataProvider().featureCount(), len(features))
def _checkUpdateFeatures(self, wfs_layer, old_features, new_features):
"""
Check features can be updated
"""
for i in range(len(old_features)):
f = self._getFeatureByAttribute(wfs_layer, 'id', old_features[i]['id'])
self.assertTrue(wfs_layer.dataProvider().changeGeometryValues({f.id(): new_features[i].geometry()}))
self.assertTrue(wfs_layer.dataProvider().changeAttributeValues({f.id(): {0: new_features[i]['id']}}))
self.assertTrue(wfs_layer.dataProvider().changeAttributeValues({f.id(): {1: new_features[i]['name']}}))
def _checkMatchFeatures(self, wfs_layer, features):
"""
Check feature attributes and geometry match
"""
for f in features:
wf = self._getFeatureByAttribute(wfs_layer, 'id', f['id'])
self.assertEqual(wf.geometry().asWkt(),
f.geometry().asWkt())
self.assertEqual(f['name'], wf['name'])
def _checkDeleteFeatures(self, layer, features):
"""
Delete features
"""
ids = []
for f in features:
wf = self._getFeatureByAttribute(layer, 'id', f['id'])
ids.append(wf.id())
self.assertTrue(layer.dataProvider().deleteFeatures(ids))
def _testLayer(self, wfs_layer, layer, old_features, new_features):
"""
Perform all test steps on the layer.
"""
self.assertEqual(wfs_layer.featureCount(), 0)
self._checkAddFeatures(wfs_layer, layer, old_features)
self._checkMatchFeatures(wfs_layer, old_features)
self.assertEqual(wfs_layer.dataProvider().featureCount(),
len(old_features))
self._checkUpdateFeatures(wfs_layer, old_features, new_features)
self._checkMatchFeatures(wfs_layer, new_features)
self._checkDeleteFeatures(wfs_layer, new_features)
self.assertEqual(wfs_layer.dataProvider().featureCount(), 0)
def testWFSPoints(self):
"""
Adds some points, then check and clear all
"""
layer_name = 'test_point'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.fields())
feat1['id'] = 11
feat1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(9, 45)))
feat2 = QgsFeature(wfs_layer.fields())
feat2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(9.5, 45.5)))
feat2['id'] = 12
old_features = [feat1, feat2]
# Change feat1
new_feat1 = QgsFeature(wfs_layer.fields())
new_feat1['id'] = 121
new_feat1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 46)))
new_features = [new_feat1, feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
def testWFSPointsMultipleEdits(self):
"""
Adds some points, then check.
Modify 2 points, then checks and clear all
"""
layer_name = 'test_point'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.fields())
feat1['id'] = 11
feat1['name'] = 'name 11'
feat1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(9, 45)))
feat2 = QgsFeature(wfs_layer.fields())
feat2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(9.5, 45.5)))
feat2['id'] = 12
feat2['name'] = 'name 12'
old_features = [feat1, feat2]
# Change feat1 and feat2
new_feat1 = QgsFeature(wfs_layer.fields())
new_feat1['id'] = 121
new_feat1['name'] = 'name 121'
new_feat1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 46)))
new_feat2 = QgsFeature(wfs_layer.fields())
new_feat2['id'] = 122
new_feat2['name'] = 'name 122'
new_feat2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10.5, 47)))
new_features = [new_feat1, new_feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
def testWFSPolygons(self):
"""
Adds some polygons, then check and clear all
"""
layer_name = 'test_polygon'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.fields())
feat1['id'] = 11
feat1['name'] = 'name 11'
feat1.setGeometry(QgsGeometry.fromRect(QgsRectangle(QgsPointXY(9, 45), QgsPointXY(10, 46))))
feat2 = QgsFeature(wfs_layer.fields())
feat2.setGeometry(QgsGeometry.fromRect(QgsRectangle(QgsPointXY(9.5, 45.5), QgsPointXY(10.5, 46.5))))
feat2['id'] = 12
feat2['name'] = 'name 12'
old_features = [feat1, feat2]
# Change feat1
new_feat1 = QgsFeature(wfs_layer.fields())
new_feat1['id'] = 121
new_feat1['name'] = 'name 121'
new_feat1.setGeometry(QgsGeometry.fromRect(QgsRectangle(QgsPointXY(10, 46), QgsPointXY(11.5, 47.5))))
new_features = [new_feat1, feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
def testWFSLineStrings(self):
"""
Adds some lines, then check and clear all
"""
layer_name = 'test_linestring'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.fields())
feat1['id'] = 11
feat1['name'] = 'name 11'
feat1.setGeometry(QgsGeometry.fromPolylineXY([QgsPointXY(9, 45), QgsPointXY(10, 46)]))
feat2 = QgsFeature(wfs_layer.fields())
feat2.setGeometry(QgsGeometry.fromPolylineXY([QgsPointXY(9.5, 45.5), QgsPointXY(10.5, 46.5)]))
feat2['id'] = 12
feat2['name'] = 'name 12'
old_features = [feat1, feat2]
# Change feat1
new_feat1 = QgsFeature(wfs_layer.fields())
new_feat1['id'] = 121
new_feat1['name'] = 'name 121'
new_feat1.setGeometry(QgsGeometry.fromPolylineXY([QgsPointXY(9.8, 45.8), QgsPointXY(10.8, 46.8)]))
new_features = [new_feat1, feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
mrquim/repository.mrquim | repo/plugin.video.live.streamspro/_ytplist.py | 173 | 4161 | import urllib
import urllib2,json
import xbmcvfs
import requests,time
import os,xbmc,xbmcaddon,xbmcgui,re
addon = xbmcaddon.Addon('plugin.video.live.streamspro')
profile = xbmc.translatePath(addon.getAddonInfo('profile').decode('utf-8'))
cacheDir = os.path.join(profile, 'cachedir')
clean_cache=os.path.join(cacheDir,'cleancacheafter1month')
headers=dict({'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:32.0) Gecko/20100101 Firefox/32.0'})
if not cacheDir.startswith(('smb://', 'nfs://', 'upnp://', 'ftp://')) and not os.path.isdir(cacheDir):
os.mkdir(cacheDir)
if xbmcvfs.exists(clean_cache) and (time.time()-os.path.getmtime(clean_cache) > 60*60*24*30):
print 'time of creation of ff',str(time.time()-os.path.getmtime(clean_cache))
import shutil
shutil.rmtree(cacheDir)
else:
with open(clean_cache,'w') as f:
f.write('')
utubeid = 'www.youtube.*?v(?:=|%3D)([0-9A-Za-z_-]{11})'
def YoUTube(page_data,youtube=None,duration=None,max_page=20,nosave=None):
pDialog = xbmcgui.DialogProgress()
pDialog.create('Updating list', 'Downloading ...')
base_yt_url ='http://gdata.youtube.com/feeds/api'
if 'search' in page_data:
youtube = youtube.replace(' ','+')#Lana Del Rey
build_url= base_yt_url + '/videos?q=%s&max-results=50&v=2&alt=json&orderby=published&start-index=%s'
if addon.getSetting('searchlongvideos') == 'true': #duration: #medium or long
build_url = base_yt_url + '/videos?q=%s&max-results=20&v=2&alt=json&duration=long&start-index=%s'
else:
build_url = 'http://www.youtube.com/watch?v=%s' %page_data
count = 1
allurls ={}
for i in range(1,max_page):
url = build_url %(youtube,str(count))
#print url
try:
content = cache(url,int(addon.getSetting("Youtube")))
print len(content)
jcontent = json.loads(content)
entry = jcontent['feed']['entry']
except Exception:
break
for myUrl in entry:
count += 1
allitem = 'item' + str(count)
item = {}
item['title']= removeNonAscii(myUrl['title']['$t']).encode('utf-8')
item['date']= myUrl['published']['$t'].encode('utf-8')
try:
item['desc']= removeNonAscii(myUrl['media$group']['media$description']['$t']).encode('utf-8')
except Exception:
desc = 'UNAVAIABLE'
link = myUrl['link'][0]['href'].encode('utf-8','ignore')
item['url']= re_me(link,utubeid)
allurls[allitem] = item
print len(allurls)
if nosave:
return allurls
pDialog.close()
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data,re.I)
if m != None:
match = m.group(1)
else:
match = ''
return match
def notification(header="", message="", sleep=3000):
""" Will display a notification dialog with the specified header and message,
in addition you can set the length of time it displays in milliseconds and a icon image.
"""
xbmc.executebuiltin("XBMC.Notification(%s,%s,%i)" % ( header, message, sleep ))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def makeRequest(url,referer=None,post=None,body={}):
if referer:
headers.update=({'Referer':referer})
else:
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
# from AddonScriptorde X:\plugin.video.my_music_tv\default.py
def cache(url, duration=0):
cacheFile = os.path.join(cacheDir, (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip())
if os.path.exists(cacheFile) and duration!=0 and (time.time()-os.path.getmtime(cacheFile) < 60*60*24*duration):
fh = xbmcvfs.File(cacheFile, 'r')
content = fh.read()
fh.close()
return content
else:
content = makeRequest(url)
fh = xbmcvfs.File(cacheFile, 'w')
fh.write(content)
fh.close()
return content
| gpl-2.0 |
Hybrid-Cloud/Hybrid-Cloud-Patches-For-Tricircle | hybrid-cloud/cinder/volume/api.py | 4 | 58671 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes.
"""
import collections
import datetime
import functools
from oslo.config import cfg
import six
from cinder import context
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import glance
from cinder import keymgr
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval list of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled'])
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
return tuple(azs)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None, backup_source_volume=None,
source_replica=None, consistencygroup=None,shareable=False):
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possbility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation or true float or int).
if size and (not utils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request '
'(size argument must be an integer (or string '
'represenation or an integer) and greater '
'than zero).')
raise exception.InvalidInput(reason=msg)
if consistencygroup:
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.get('volume_type_id')
if volume_type.get('id') not in cg_voltypeids:
msg = _("Invalid volume_type provided (requested type "
"must be supported by this consistency group).")
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source volume, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica, type must be omitted.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source snapshot, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'backup_source_volume': backup_source_volume,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'shareable': shareable,
}
try:
flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
self.volume_rpcapi,
self.db,
self.image_service,
availability_zones,
create_what)
except Exception:
LOG.exception(_("Failed to create api volume flow"))
raise exception.CinderException(
_("Failed to create api volume flow"))
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return flow_engine.storage.fetch('volume')
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update quota for deleting volume"))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
return
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s") % volume['status']
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
@wrap_check_policy
def update(self, context, volume, fields):
self.db.volume_update(context, volume['id'], fields)
def get(self, context, volume_id, viewable_admin_meta=False):
old_ctxt = context.deepcopy()
if viewable_admin_meta:
ctxt = context.elevated()
else:
ctxt = context
rv = self.db.volume_get(ctxt, volume_id)
volume = dict(rv.iteritems())
try:
check_policy(old_ctxt, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound instead to make sure Cinder behaves
# as it used to
raise exception.VolumeNotFound(volume_id=volume_id)
return volume
def _get_all_tenants_value(self, filters):
"""Returns a Boolean for the value of filters['all_tenants'].
False is returned if 'all_tenants' is not in the filters dictionary.
An InvalidInput exception is thrown for invalid values.
"""
b = False
if 'all_tenants' in filters:
val = six.text_type(filters['all_tenants']).lower()
if val in ['true', '1']:
b = True
elif val in ['false', '0']:
b = False
else:
msg = _('all_tenants param must be 0 or 1')
raise exception.InvalidInput(reason=msg)
return b
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None, viewable_admin_meta=False):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = self._get_all_tenants_value(filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s" % six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit, sort_key,
sort_dir, filters=filters)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_key, sort_dir,
filters=filters)
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = self.db.snapshot_get_all(context)
else:
snapshots = self.db.snapshot_get_all_by_project(
context, context.project_id)
if search_opts:
LOG.debug("Searching by: %s" % search_opts)
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
#NOTE(jdg): check for Race condition bug 1096983
#explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
elif volume['status'] == 'in-use':
if volume['shareable']:
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume status must be shareable to reserve again.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
else:
msg = _("Volume status must be available to reserve")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def unreserve_volume(self, context, volume):
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'attaching':
attaches = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if attaches:
self.update(context, volume, {"status": "in-use"})
else:
self.update(context, volume, {"status": "available"})
@wrap_check_policy
def begin_detaching(self, context, volume):
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if volume['migration_status']:
return
if (volume['status'] != 'in-use' or
volume['attach_status'] != 'attached'):
msg = (_("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach. "
"Currently: status: '%(status)s', "
"attach_status: '%(attach_status)s'") %
{'status': volume['status'],
'attach_status': volume['attach_status']})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.update(context, volume, {"status": "detaching"})
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
@wrap_check_policy
def detach(self, context, volume, attachment_id):
return self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
return self.volume_rpcapi.initialize_connection(context,
volume,
connector)
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
return self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
return self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None):
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
# Shareable volume that attached to multiple instances cannot create snapshot
if volume['shareable']:
attachments = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if len(attachments) > 1:
msg = _("Shareable volume that attached "
"multiple instances cannot create snapshot.")
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata}
snapshot = None
try:
snapshot = self.db.snapshot_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if snapshot:
self.db.snapshot_destroy(context, snapshot['id'])
finally:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
force, cgsnapshot_id):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume, force)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = self.db.snapshot_create(context, options)
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
self.db.snapshot_destroy(context, snap['id'])
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume, force):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Snapshot cannot be created because volume '%s' is not "
"available.") % volume['id']
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warning(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")
LOG.warning(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None):
return self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id)
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
True, metadata)
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
msg = _("Volume Snapshot status must be available or error")
raise exception.InvalidSnapshot(reason=msg)
cgsnapshot_id = snapshot.get('cgsnapshot_id', None)
if cgsnapshot_id:
msg = _("Snapshot %s is part of a cgsnapshot and has to be "
"deleted together with the cgsnapshot.") % snapshot['id']
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
volume = self.db.volume_get(context, snapshot['volume_id'])
self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
self.db.snapshot_update(context, snapshot['id'], fields)
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
try:
self.volume_rpcapi.delete_volume_metadata(context, volume, key)
except Exception:
pass
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
try:
self.volume_rpcapi.update_volume_metadata(context, volume, _metadata, delete)
except Exception:
pass
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
return dict(rv.iteritems())
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
self.db.snapshot_metadata_delete(context, snapshot['id'], key)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_snapshot_metadata(context, snapshot)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.snapshot_metadata_update(context,
snapshot['id'],
_metadata,
True)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_snapshot_metadata_value(self, snapshot, key):
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
return dict(
(meta_entry.key, meta_entry.value) for meta_entry in db_data
)
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
self._check_volume_availability(volume, force)
glance_core_properties = CONF.glance_core_properties
if glance_core_properties:
try:
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
metadata.update(dict(properties=dict((custom_property,
volume_image_metadata
[custom_property])
for custom_property
in custom_property_set)))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
# begin added by liuling
image_name = metadata["name"]
if not image_name.startswith('image@'):
recv_metadata = self.image_service.create(context, metadata)
elif image_name.startswith('image@') and '_' not in image_name:
image_id = image_name.split('@')[1].split('_')[0]
image_name = image_name.split('@')[1]
recv_metadata = self.image_service.show(context, image_id)
else:
image_id = image_name.split('@')[1].split('_')[0]
metadata['id']=image_id
metadata['name'] = image_name
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
# end added by liuling
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume status must be available to extend.')
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s)") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reservations = QUOTAS.reserve(context, gigabytes=+size_increase)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _("Quota exceeded for %(s_pid)s, tried to extend volume by "
"%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG already "
"consumed).")
LOG.error(msg % {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['status'] in ['in-use'] and volume['shareable']:
msg = _('Only available shareable Volume can be migrated')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume is already part of an active migration")
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
msg = _("volume must not have snapshots")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle non-replicated volumes for now
rep_status = volume['replication_status']
if rep_status is not None and rep_status != 'disabled':
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
svc_host = volume_utils.extract_host(host, 'backend')
if utils.service_is_up(service) and service['host'] == svc_host:
found = True
if not found:
msg = (_('No available service named %s') % host)
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different than current host')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume status must be available to update readonly flag.')
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status '
'on volume: %s') % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = (_('New volume_type same as original: %s') % new_type)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['qos_specs']['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volumes')
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None, bootable=False):
if availability_zone is None:
elevated = context.elevated()
try:
svc_host = volume_utils.extract_host(host, 'backend')
service = self.db.service_get_by_host_and_topic(
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
volume_properties = {
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': description,
'display_name': name,
'host': host,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id,
'metadata': metadata,
'bootable': bootable
}
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume = self.db.volume_create(context, volume_properties)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id'],
'ref': ref}
self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
volume['id'],
request_spec=request_spec)
return volume
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
volume evacuation.
"""
raise NotImplementedError()
| gpl-2.0 |
TeamCohen/TensorLog | tensorlog/debug.py | 1 | 8193 | # (C) William W. Cohen and Carnegie Mellon University, 2016
#
# support for debugging/visualization
#
import sys
import tkinter as TK
import tkinter.ttk
import tkinter.font
import time
from tensorlog import comline
from tensorlog import config
from tensorlog import dataset
from tensorlog import declare
from tensorlog import learn
from tensorlog import matrixdb
from tensorlog import mutil
from tensorlog import opfunutil
conf = config.Config()
conf.sortByValue = True; conf.help.sortByValue = "In displaying message values, sort entries by weight if true, by name if false."
conf.fontsize = None; conf.help.fontsize = "Size of font, eg 14"
conf.fontweight = None; conf.help.fontsize = "Weight of font, eg 'bold'"
class Debugger(object):
def __init__(self,initProgram,targetPred,trainData,gradient=False):
self.rendered = False
self.sortByValue = conf.sortByValue
self.prog = initProgram
self.trainData = trainData
self.targetPred = targetPred
#evaluate the function so the outputs are cached
assert self.targetPred,'most specify targetPred'
self.mode = declare.asMode(self.targetPred)
assert self.trainData.hasMode(self.mode),"No mode '%s' in trainData" % self.mode
self.X = self.trainData.getX(self.mode)
self.Y = self.trainData.getY(self.mode)
self.fun = self.prog.getPredictFunction(self.mode)
self.pad = opfunutil.Scratchpad()
self.P = self.fun.eval(self.prog.db, [self.X], self.pad)
# find the symbols that correspond to the inputs
dd = self.prog.db.matrixAsSymbolDict(self.X)
self.xSymbols = [list(d.keys())[0] for d in list(dd.values())]
# evaluate the gradient so that's cached
if gradient:
learner = learn.OnePredFixedRateGDLearner(self.prog, tracer=learn.Tracer.silent)
self.grad = learner.crossEntropyGrad(self.mode, self.X, self.Y, pad=self.pad)
else:
self.grad = None
def render(self):
#set up a window
self.root = TK.Tk()
default_font = tkinter.font.nametofont("TkDefaultFont")
if conf.fontsize:
default_font.configure(size=conf.fontsize)
if conf.fontweight:
default_font.configure(weight=conf.fontweight)
self.root.option_add("*Font", default_font)
#labels on the top
self.treeLabel = tkinter.ttk.Label(self.root,text="Listing of %s" % str(self.mode))
self.treeLabel.grid(row=0,column=1,sticky=TK.EW)
self.msgLabel = tkinter.ttk.Label(self.root,text="Details")
self.msgLabel.grid(row=0,column=2,sticky=TK.EW)
#put a scrollbars on the left and right
#these don't work now? maybe they worked with pack?
# self.scrollbarL = ttk.Scrollbar(self.root)
# self.scrollbarL.grid(row=1,column=0)
# self.scrollbarR = ttk.Scrollbar(self.root)
# self.scrollbarR.grid(row=1,column=4)
#set up a treeview widget and tie it to the left scrollbar
self.tree = tkinter.ttk.Treeview(self.root)
self.tree.grid(row=1,column=1,sticky=TK.NSEW)
# self.tree.config(yscrollcommand=self.scrollbarL.set)
# self.scrollbarL.config(command=self.tree.yview)
#adjust the columns
self.tree["columns"]=("comment","output","delta")
self.tree.column("#0", width=300 )
self.tree.column("comment", width=300 )
self.tree.column("output", width=150)
self.tree.column("delta", width=150)
self.tree.heading("comment", text="comment")
self.tree.heading("output", text="output")
self.tree.heading("delta", text="delta")
# save the function/op deltas and outputs for each tree node,
# indexed by the tree id
self.treeOutputs = {}
self.treeDeltas = {}
#fill the tree with the function and its children
self.populateTree([self.fun],"")
# set up another treeview to display the function output/deltas,
# which will be triggered when you doubleclick
self.msg = tkinter.ttk.Treeview(self.root,height=30)
self.msg["columns"] = ("weight")
self.msg.heading("weight", text="weight")
self.msg.grid(row=1,column=2)
self.msgItems = set()
#tree will fill the msg window on doubleclick
self.tree.bind("<Button-1>", self.DisplayMsg)
# tie it to the right scrollbar
# self.tree.config(yscrollcommand=self.scrollbarR.set)
# self.scrollbarR.config(command=self.msg.yview)
def DisplayMsg(self,event):
"""display the message sent by with an op
or the output for a function."""
key = self.tree.identify_row(event.y)
# figure out where we clicked - returns #0, #1, ...
colStr = self.tree.identify_column(event.x)
colNum = int(colStr[1:])
tag = self.tree.item(key,option='text')
if colNum>=3:
m = self.treeDeltas[key]
if m==None:
self.msgLabel.config(text='Delta for %s unavailable' % tag)
else:
self.msgLabel.config(text='Delta for %s' % tag)
else:
self.msgLabel.config(text='Output for %s' % tag)
m = self.treeOutputs[key]
for it in self.msgItems:
self.msg.delete(it)
self.msgItems = set()
if m!=None:
dOfD = self.prog.db.matrixAsSymbolDict(m)
rowVector = len(list(dOfD.keys()))==1
for r in sorted(dOfD.keys()):
rowName = "Row Vector:" if rowVector else self.xSymbols[r]
rowChild = self.msg.insert("",r,text=rowName,open=True)
self.msgItems.add(rowChild)
def sortKey(k):
if self.sortByValue==True:
return -dOfD[r][k]
else:
return k
for offset,sym in enumerate(sorted(list(dOfD[r].keys()), key=sortKey)):
#why are some of these None?
if sym!=None:
w = dOfD[r][sym]
child = self.msg.insert(rowChild,offset,text=sym,values=("%.5f" % w),open=True)
def populateTree(self,funs,parent):
for offset,fun in enumerate(funs):
description = fun.pprintSummary()
comment = fun.pprintComment()
key = "iid%d" % len(list(self.treeOutputs.keys()))
funOutput = self.pad[fun.id].output
if self.grad:
#todo: clean up
if 'delta' in self.pad[fun.id].__dict__:
funDelta = self.pad[fun.id].delta
else:
funDelta = None
else:
funDelta = None
child = self.tree.insert(
parent,offset,iid=key,text=description,
values=(comment,mutil.pprintSummary(funOutput),mutil.pprintSummary(funDelta)),open=True)
self.treeOutputs[key] = funOutput
self.treeDeltas[key] = funDelta
self.populateTree(fun.children(), child)
def mainloop(self):
if not self.rendered:
self.render()
self.root.mainloop()
if __name__ == "__main__":
def usage():
print('debug.py [usual tensorlog options] mode [inputs]')
optdict,args = comline.parseCommandLine(sys.argv[1:])
dset = optdict.get('trainData') or optdict.get('testData')
if dset==None and len(args)<2:
usage()
print('debug on what input? specify --trainData or give a function input')
elif len(args)<1:
usage()
elif dset and len(args)>2:
print('using --trainData not the function input given')
elif dset:
mode = declare.asMode(args[0])
Debugger(optdict['prog'],mode,dset,gradient=True).mainloop()
else:
mode = declare.asMode(args[0])
assert db.isTypeless(),'cannot debug a database with declared types'
X = optdict['prog'].db.onehot(args[1])
dset = dataset.Dataset({mode:X},{mode:optdict['prog'].db.zeros()})
Debugger(optdict['prog'],mode,dset,gradient=False).mainloop()
| apache-2.0 |
Blizzard/heroprotocol | heroprotocol/versions/protocol65751.py | 10 | 26622 | #!/usr/bin/env python
#
# Copyright 2015-2021 Blizzard Entertainment. Subject to the MIT license.
# See the included LICENSE file for more information.
#
import six
from heroprotocol.decoders import CorruptedError, BitPackedBuffer, BitPackedDecoder, VersionedDecoder
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_array',[(16,0),10]), #14
('_optional',[14]), #15
('_blob',[(16,0)]), #16
('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_replayCompatibilityHash',17,7)]]), #18
('_fourcc',[]), #19
('_blob',[(0,7)]), #20
('_int',[(0,64)]), #21
('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23
('_int',[(0,2)]), #24
('_optional',[10]), #25
('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26
('_array',[(0,5),26]), #27
('_optional',[27]), #28
('_blob',[(0,10)]), #29
('_blob',[(0,11)]), #30
('_struct',[[('m_file',30,0)]]), #31
('_optional',[13]), #32
('_int',[(-9223372036854775808,64)]), #33
('_blob',[(0,12)]), #34
('_blob',[(40,0)]), #35
('_array',[(0,6),35]), #36
('_optional',[36]), #37
('_array',[(0,6),30]), #38
('_optional',[38]), #39
('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40
('_optional',[9]), #41
('_optional',[35]), #42
('_optional',[6]), #43
('_struct',[[('m_race',25,-1)]]), #44
('_struct',[[('m_team',25,-1)]]), #45
('_blob',[(0,9)]), #46
('_struct',[[('m_name',9,-20),('m_clanTag',41,-19),('m_clanLogo',42,-18),('m_highestLeague',25,-17),('m_combinedRaceLevels',43,-16),('m_randomSeed',6,-15),('m_racePreference',44,-14),('m_teamPreference',45,-13),('m_testMap',13,-12),('m_testAuto',13,-11),('m_examine',13,-10),('m_customInterface',13,-9),('m_testType',6,-8),('m_observe',24,-7),('m_hero',46,-6),('m_skin',46,-5),('m_mount',46,-4),('m_banner',46,-3),('m_spray',46,-2),('m_toonHandle',20,-1)]]), #47
('_array',[(0,5),47]), #48
('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_ammId',43,-1)]]), #49
('_int',[(1,4)]), #50
('_int',[(1,8)]), #51
('_bitarray',[(0,6)]), #52
('_bitarray',[(0,8)]), #53
('_bitarray',[(0,4)]), #54
('_bitarray',[(0,2)]), #55
('_bitarray',[(0,7)]), #56
('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',54,-3),('m_allowedObserveTypes',55,-2),('m_allowedAIBuilds',56,-1)]]), #57
('_array',[(0,5),57]), #58
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',1,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',58,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #59
('_optional',[1]), #60
('_optional',[2]), #61
('_struct',[[('m_color',61,-1)]]), #62
('_array',[(0,17),6]), #63
('_struct',[[('m_hero',19,-2),('m_tier',10,-1)]]), #64
('_array',[(0,10),64]), #65
('_struct',[[('m_control',10,-24),('m_userId',60,-23),('m_teamId',1,-22),('m_colorPref',62,-21),('m_racePref',44,-20),('m_difficulty',3,-19),('m_aiBuild',0,-18),('m_handicap',0,-17),('m_observe',24,-16),('m_logoIndex',6,-15),('m_hero',46,-14),('m_skin',46,-13),('m_mount',46,-12),('m_workingSetSlotId',25,-11),('m_rewards',63,-10),('m_toonHandle',20,-9),('m_tandemLeaderUserId',60,-8),('m_hasSilencePenalty',13,-7),('m_hasVoiceSilencePenalty',13,-6),('m_banner',46,-5),('m_spray',46,-4),('m_announcerPack',46,-3),('m_voiceLine',46,-2),('m_heroMasteryTiers',65,-1)]]), #66
('_array',[(0,5),66]), #67
('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',67,-8),('m_randomSeed',6,-7),('m_hostUserId',60,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #68
('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',59,-2),('m_lobbyState',68,-1)]]), #69
('_struct',[[('m_syncLobbyState',69,-1)]]), #70
('_struct',[[('m_name',20,-1)]]), #71
('_blob',[(0,6)]), #72
('_struct',[[('m_name',72,-1)]]), #73
('_struct',[[('m_name',72,-3),('m_type',6,-2),('m_data',20,-1)]]), #74
('_struct',[[('m_type',6,-3),('m_name',72,-2),('m_data',34,-1)]]), #75
('_array',[(0,5),10]), #76
('_struct',[[('m_signature',76,-2),('m_toonHandle',20,-1)]]), #77
('_struct',[[('m_gameFullyDownloaded',13,-14),('m_developmentCheatsEnabled',13,-13),('m_testCheatsEnabled',13,-12),('m_multiplayerCheatsEnabled',13,-11),('m_syncChecksummingEnabled',13,-10),('m_isMapToMapTransition',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #78
('_struct',[[]]), #79
('_int',[(0,16)]), #80
('_struct',[[('x',80,-2),('y',80,-1)]]), #81
('_struct',[[('m_which',12,-2),('m_target',81,-1)]]), #82
('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #83
('_int',[(1,32)]), #84
('_struct',[[('m_sequence',84,-1)]]), #85
('_null',[]), #86
('_int',[(0,20)]), #87
('_int',[(-2147483648,32)]), #88
('_struct',[[('x',87,-3),('y',87,-2),('z',88,-1)]]), #89
('_struct',[[('m_targetUnitFlags',80,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',80,-4),('m_snapshotControlPlayerId',60,-3),('m_snapshotUpkeepPlayerId',60,-2),('m_snapshotPoint',89,-1)]]), #90
('_choice',[(0,2),{0:('None',86),1:('TargetPoint',89),2:('TargetUnit',90)}]), #91
('_struct',[[('m_target',91,-4),('m_time',88,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #92
('_struct',[[('m_data',92,-1)]]), #93
('_int',[(0,26)]), #94
('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #95
('_optional',[95]), #96
('_choice',[(0,2),{0:('None',86),1:('TargetPoint',89),2:('TargetUnit',90),3:('Data',6)}]), #97
('_optional',[89]), #98
('_struct',[[('m_cmdFlags',94,-7),('m_abil',96,-6),('m_data',97,-5),('m_vector',98,-4),('m_sequence',84,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #99
('_array',[(0,6),2]), #100
('_choice',[(0,2),{0:('None',86),1:('Mask',52),2:('OneIndices',100),3:('ZeroIndices',100)}]), #101
('_struct',[[('m_unitLink',80,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',3,-1)]]), #102
('_array',[(0,6),102]), #103
('_array',[(0,6),6]), #104
('_struct',[[('m_subgroupIndex',2,-4),('m_removeMask',101,-3),('m_addSubgroups',103,-2),('m_addUnitTags',104,-1)]]), #105
('_struct',[[('m_controlGroupId',1,-2),('m_delta',105,-1)]]), #106
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',12,-2),('m_mask',101,-1)]]), #107
('_struct',[[('m_count',3,-6),('m_subgroupCount',3,-5),('m_activeSubgroupIndex',2,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #108
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',108,-1)]]), #109
('_struct',[[('m_chatMessage',29,-1)]]), #110
('_struct',[[('m_speed',12,-1)]]), #111
('_int',[(-128,8)]), #112
('_struct',[[('m_delta',112,-1)]]), #113
('_struct',[[('x',88,-2),('y',88,-1)]]), #114
('_struct',[[('m_point',114,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',88,-1)]]), #115
('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #116
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #117
('_struct',[[('m_unitTag',6,-1)]]), #118
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #119
('_struct',[[('m_conversationId',88,-2),('m_replyId',88,-1)]]), #120
('_optional',[20]), #121
('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',121,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #122
('_array',[(0,5),122]), #123
('_int',[(0,1)]), #124
('_struct',[[('m_userInfos',123,-2),('m_method',124,-1)]]), #125
('_struct',[[('m_button',80,-2),('m_metaKeyFlags',80,-1)]]), #126
('_choice',[(0,3),{0:('None',86),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',88),4:('TextChanged',30),5:('MouseEvent',126)}]), #127
('_struct',[[('m_controlId',88,-3),('m_eventType',88,-2),('m_eventData',127,-1)]]), #128
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #129
('_array',[(0,7),6]), #130
('_struct',[[('m_soundHash',130,-2),('m_length',130,-1)]]), #131
('_struct',[[('m_syncInfo',131,-1)]]), #132
('_struct',[[('m_queryId',80,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #133
('_struct',[[('m_queryId',80,-2),('m_lengthMs',6,-1)]]), #134
('_struct',[[('m_animWaitQueryId',80,-1)]]), #135
('_struct',[[('m_sound',6,-1)]]), #136
('_struct',[[('m_transmissionId',88,-2),('m_thread',6,-1)]]), #137
('_struct',[[('m_transmissionId',88,-1)]]), #138
('_optional',[81]), #139
('_optional',[80]), #140
('_optional',[112]), #141
('_struct',[[('m_target',139,-6),('m_distance',140,-5),('m_pitch',140,-4),('m_yaw',140,-3),('m_reason',141,-2),('m_follow',13,-1)]]), #142
('_struct',[[('m_skipType',124,-1)]]), #143
('_int',[(0,11)]), #144
('_struct',[[('x',144,-2),('y',144,-1)]]), #145
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',145,-3),('m_posWorld',89,-2),('m_flags',112,-1)]]), #146
('_struct',[[('m_posUI',145,-3),('m_posWorld',89,-2),('m_flags',112,-1)]]), #147
('_struct',[[('m_achievementLink',80,-1)]]), #148
('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #149
('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_state',112,-1)]]), #150
('_struct',[[('m_soundtrack',6,-1)]]), #151
('_struct',[[('m_key',112,-2),('m_flags',112,-1)]]), #152
('_struct',[[('m_error',88,-2),('m_abil',96,-1)]]), #153
('_int',[(0,19)]), #154
('_struct',[[('m_decrementMs',154,-1)]]), #155
('_struct',[[('m_portraitId',88,-1)]]), #156
('_struct',[[('m_functionName',20,-1)]]), #157
('_struct',[[('m_result',88,-1)]]), #158
('_struct',[[('m_gameMenuItemIndex',88,-1)]]), #159
('_int',[(-32768,16)]), #160
('_struct',[[('m_wheelSpin',160,-2),('m_flags',112,-1)]]), #161
('_struct',[[('m_button',80,-1)]]), #162
('_struct',[[('m_cutsceneId',88,-2),('m_bookmarkName',20,-1)]]), #163
('_struct',[[('m_cutsceneId',88,-1)]]), #164
('_struct',[[('m_cutsceneId',88,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #165
('_struct',[[('m_cutsceneId',88,-2),('m_conversationLine',20,-1)]]), #166
('_struct',[[('m_leaveReason',2,-1)]]), #167
('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',121,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',60,-1)]]), #168
('_optional',[84]), #169
('_struct',[[('m_state',24,-2),('m_sequence',169,-1)]]), #170
('_struct',[[('m_sequence',169,-2),('m_target',89,-1)]]), #171
('_struct',[[('m_sequence',169,-2),('m_target',90,-1)]]), #172
('_struct',[[('m_catalog',10,-4),('m_entry',80,-3),('m_field',9,-2),('m_value',9,-1)]]), #173
('_struct',[[('m_index',6,-1)]]), #174
('_struct',[[('m_shown',13,-1)]]), #175
('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #176
('_struct',[[('m_recipient',12,-2),('m_point',114,-1)]]), #177
('_struct',[[('m_progress',88,-1)]]), #178
('_struct',[[('m_status',24,-1)]]), #179
('_struct',[[('m_abilLink',80,-3),('m_abilCmdIndex',2,-2),('m_buttonLink',80,-1)]]), #180
('_struct',[[('m_behaviorLink',80,-2),('m_buttonLink',80,-1)]]), #181
('_choice',[(0,2),{0:('None',86),1:('Ability',180),2:('Behavior',181),3:('Vitals',160)}]), #182
('_struct',[[('m_announcement',182,-4),('m_announceLink',80,-3),('m_otherUnitTag',6,-2),('m_unitTag',6,-1)]]), #183
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #184
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_x',10,2),('m_y',10,3)]]), #185
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',60,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #186
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #187
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #188
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',88,2)]]), #189
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #190
('_array',[(0,10),88]), #191
('_struct',[[('m_firstUnitIndex',6,0),('m_items',191,1)]]), #192
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #193
('_struct',[[('m_key',29,0)]]), #194
('_struct',[[('__parent',194,0),('m_value',29,1)]]), #195
('_array',[(0,6),195]), #196
('_optional',[196]), #197
('_struct',[[('__parent',194,0),('m_value',88,1)]]), #198
('_array',[(0,6),198]), #199
('_optional',[199]), #200
('_struct',[[('m_eventName',29,0),('m_stringData',197,1),('m_intData',200,2),('m_fixedData',200,3)]]), #201
('_struct',[[('m_value',6,0),('m_time',6,1)]]), #202
('_array',[(0,6),202]), #203
('_array',[(0,5),203]), #204
('_struct',[[('m_name',29,0),('m_values',204,1)]]), #205
('_array',[(0,21),205]), #206
('_struct',[[('m_instanceList',206,0)]]), #207
('_struct',[[('m_hero',29,0),('m_controllingTeam',6,1)]]), #208
('_struct',[[('m_hero',29,0),('m_controllingPlayer',6,1)]]), #209
('_struct',[[('m_hero',29,0),('m_newControllingPlayer',6,1)]]), #210
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (79, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (78, 'NNet.Game.SUserOptionsEvent'),
9: (71, 'NNet.Game.SBankFileEvent'),
10: (73, 'NNet.Game.SBankSectionEvent'),
11: (74, 'NNet.Game.SBankKeyEvent'),
12: (75, 'NNet.Game.SBankValueEvent'),
13: (77, 'NNet.Game.SBankSignatureEvent'),
14: (82, 'NNet.Game.SCameraSaveEvent'),
21: (83, 'NNet.Game.SSaveGameEvent'),
22: (79, 'NNet.Game.SSaveGameDoneEvent'),
23: (79, 'NNet.Game.SLoadGameDoneEvent'),
25: (85, 'NNet.Game.SCommandManagerResetEvent'),
26: (93, 'NNet.Game.SGameCheatEvent'),
27: (99, 'NNet.Game.SCmdEvent'),
28: (106, 'NNet.Game.SSelectionDeltaEvent'),
29: (107, 'NNet.Game.SControlGroupUpdateEvent'),
30: (109, 'NNet.Game.SSelectionSyncCheckEvent'),
32: (110, 'NNet.Game.STriggerChatMessageEvent'),
34: (111, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (113, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (115, 'NNet.Game.STriggerPingEvent'),
37: (116, 'NNet.Game.SBroadcastCheatEvent'),
38: (117, 'NNet.Game.SAllianceEvent'),
39: (118, 'NNet.Game.SUnitClickEvent'),
40: (119, 'NNet.Game.SUnitHighlightEvent'),
41: (120, 'NNet.Game.STriggerReplySelectedEvent'),
43: (125, 'NNet.Game.SHijackReplayGameEvent'),
44: (79, 'NNet.Game.STriggerSkippedEvent'),
45: (129, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (136, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (137, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (138, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (142, 'NNet.Game.SCameraUpdateEvent'),
50: (79, 'NNet.Game.STriggerAbortMissionEvent'),
55: (128, 'NNet.Game.STriggerDialogControlEvent'),
56: (132, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (143, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (146, 'NNet.Game.STriggerMouseClickedEvent'),
59: (147, 'NNet.Game.STriggerMouseMovedEvent'),
60: (148, 'NNet.Game.SAchievementAwardedEvent'),
61: (149, 'NNet.Game.STriggerHotkeyPressedEvent'),
62: (150, 'NNet.Game.STriggerTargetModeUpdateEvent'),
64: (151, 'NNet.Game.STriggerSoundtrackDoneEvent'),
66: (152, 'NNet.Game.STriggerKeyPressedEvent'),
67: (157, 'NNet.Game.STriggerMovieFunctionEvent'),
76: (153, 'NNet.Game.STriggerCommandErrorEvent'),
86: (79, 'NNet.Game.STriggerMovieStartedEvent'),
87: (79, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (155, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (156, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (158, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (159, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (161, 'NNet.Game.STriggerMouseWheelEvent'),
95: (162, 'NNet.Game.STriggerButtonPressedEvent'),
96: (79, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (163, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (164, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (165, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (166, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (167, 'NNet.Game.SGameUserLeaveEvent'),
102: (168, 'NNet.Game.SGameUserJoinEvent'),
103: (170, 'NNet.Game.SCommandManagerStateEvent'),
104: (171, 'NNet.Game.SCmdUpdateTargetPointEvent'),
105: (172, 'NNet.Game.SCmdUpdateTargetUnitEvent'),
106: (133, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),
107: (134, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),
108: (135, 'NNet.Game.STriggerAnimOffsetEvent'),
109: (173, 'NNet.Game.SCatalogModifyEvent'),
110: (174, 'NNet.Game.SHeroTalentTreeSelectedEvent'),
111: (79, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),
112: (175, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (176, 'NNet.Game.SChatMessage'),
1: (177, 'NNet.Game.SPingMessage'),
2: (178, 'NNet.Game.SLoadingProgressMessage'),
3: (79, 'NNet.Game.SServerPingMessage'),
4: (179, 'NNet.Game.SReconnectNotifyMessage'),
5: (183, 'NNet.Game.SPlayerAnnounceMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
1: (184, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (186, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (187, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (188, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (189, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (184, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (190, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (192, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (193, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
10: (201, 'NNet.Replay.Tracker.SStatGameEvent'),
11: (207, 'NNet.Replay.Tracker.SScoreResultEvent'),
12: (185, 'NNet.Replay.Tracker.SUnitRevivedEvent'),
13: (208, 'NNet.Replay.Tracker.SHeroBannedEvent'),
14: (209, 'NNet.Replay.Tracker.SHeroPickedEvent'),
15: (210, 'NNet.Replay.Tracker.SHeroSwappedEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 18
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 40
# The typeid of NNet.Replay.SInitData (the type used to store the initial lobby).
replay_initdata_typeid = 70
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for v in six.itervalues(value):
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
_ = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
zymsys/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hprModelAnal-flute.py | 21 | 2771 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/flute-A4.wav'))
w = np.blackman(551)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
maxnpeaksTwm = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
mX, pX = STFT.stftAnal(x, fs, w, N, H)
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
xr = UF.sineSubtraction(x, Ns, H, hfreq, hmag, hphase, fs)
mXr, pXr = STFT.stftAnal(xr, fs, hamming(Ns), Ns, H)
maxplotfreq = 5000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(221)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('mX + harmonics (flute-A4.wav)')
plt.subplot(222)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(np.diff(pX[:,:N*maxplotfreq/fs+1],axis=1)))
plt.autoscale(tight=True)
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('pX + harmonics')
plt.subplot(223)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(Ns*maxplotfreq/fs)/Ns
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:Ns*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
plt.title('mXr')
plt.subplot(224)
numFrames = int(pXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(Ns*maxplotfreq/fs)/Ns
plt.pcolormesh(frmTime, binFreq, np.transpose(np.diff(pXr[:,:Ns*maxplotfreq/fs+1],axis=1)))
plt.autoscale(tight=True)
plt.title('pXr')
plt.tight_layout()
plt.savefig('hprModelAnal-flute.png')
UF.wavwrite(5*xr, fs, 'flute-residual.wav')
plt.show()
| agpl-3.0 |
metachris/RPIO | source/RPIO/PWM/__init__.py | 5 | 7082 | # -*- coding: utf-8 -*-
#
# This file is part of RPIO.
#
# Copyright
#
# Copyright (C) 2013 Chris Hager <chris@linuxuser.at>
#
# License
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details at
# <http://www.gnu.org/licenses/lgpl-3.0-standalone.html>
#
# Documentation
#
# http://pythonhosted.org/RPIO
#
"""
Flexible PWM via DMA for the Raspberry Pi. Supports pulse width granularities
of down to 1µs, multiple DMA channels, multiple GPIOs per channel, timing by
PWM (default) or PCM and more. RPIO.PWM is BETA; feedback highly appreciated.
You can directly access the low-level methods via PWM.init_channel(), etc. as
well as several helpers such as the PWM.Servo class. For more information take
a look at pythonhosted.org/RPIO as well as the source code at
https://github.com/metachris/RPIO/blob/master/source/c_pwm
Example of using `PWM.Servo`:
servo = RPIO.PWM.Servo()
# Set servo on GPIO17 to 1200µs (1.2ms)
servo.set_servo(17, 1200)
# Set servo on GPIO17 to 2000µs (2.0ms)
servo.set_servo(17, 2000)
# Clear servo on GPIO17
servo.stop_servo(17)
Example of using the low-level methods:
PWM.setup()
PWM.init_channel(0)
PWM.print_channel(0)
PWM.add_channel_pulse(0, 17, 0, 50)
...
PWM.clear_channel_gpio(0, 17)
...
PWM.cleanup()
"""
from RPIO.PWM import _PWM
#
# Constants from pwm.c
#
DELAY_VIA_PWM = _PWM.DELAY_VIA_PWM
DELAY_VIA_PCM = _PWM.DELAY_VIA_PCM
LOG_LEVEL_DEBUG = _PWM.LOG_LEVEL_DEBUG
LOG_LEVEL_ERRORS = _PWM.LOG_LEVEL_ERRORS
SUBCYCLE_TIME_US_DEFAULT = _PWM.SUBCYCLE_TIME_US_DEFAULT
PULSE_WIDTH_INCREMENT_GRANULARITY_US_DEFAULT = \
_PWM.PULSE_WIDTH_INCREMENT_GRANULARITY_US_DEFAULT
VERSION = _PWM.VERSION
#
# Methods from pwm.c
#
def setup(pulse_incr_us=PULSE_WIDTH_INCREMENT_GRANULARITY_US_DEFAULT, \
delay_hw=DELAY_VIA_PWM):
"""
Setup needs to be called once before working with any channels.
Optional Parameters:
pulse_incr_us: the pulse width increment granularity (deault=10us)
delay_hw: either PWM.DELAY_VIA_PWM (default) or PWM.DELAY_VIA_PCM
"""
return _PWM.setup(pulse_incr_us, delay_hw)
def cleanup():
""" Stops all PWM and DMA actvity """
return _PWM.cleanup()
def init_channel(channel, subcycle_time_us=SUBCYCLE_TIME_US_DEFAULT):
""" Setup a channel with a specific subcycle time [us] """
return _PWM.init_channel(channel, subcycle_time_us)
def clear_channel(channel):
""" Clears a channel of all pulses """
return _PWM.clear_channel(channel)
def clear_channel_gpio(channel, gpio):
""" Clears one specific GPIO from this DMA channel """
return _PWM.clear_channel_gpio(channel, gpio)
def add_channel_pulse(dma_channel, gpio, start, width):
"""
Add a pulse for a specific GPIO to a dma channel subcycle. `start` and
`width` are multiples of the pulse-width increment granularity.
"""
return _PWM.add_channel_pulse(dma_channel, gpio, start, width)
def print_channel(channel):
""" Print info about a specific channel to stdout """
return _PWM.print_channel(channel)
def set_loglevel(level):
"""
Sets the loglevel for the PWM module to either PWM.LOG_LEVEL_DEBUG for all
messages, or to PWM.LOG_LEVEL_ERRORS for only fatal error messages.
"""
return _PWM.set_loglevel(level)
def is_setup():
""" Returns 1 if setup(..) has been called, else 0 """
return _PWM.is_setup()
def is_channel_initialized(channel):
""" Returns 1 if this channel has been initialized, else 0 """
return _PWM.is_channel_initialized(channel)
def get_pulse_incr_us():
""" Returns the currently set pulse width increment granularity in us """
return _PWM.get_pulse_incr_us()
def get_channel_subcycle_time_us(channel):
""" Returns this channels subcycle time in us """
return _PWM.get_channel_subcycle_time_us(channel)
class Servo:
"""
This class is a helper for using servos on any number of GPIOs.
The subcycle time is set to the servo default of 20ms, but you can
adjust this to your needs via the `Servo.__init__(..)` method.
Example:
servo = RPIO.PWM.Servo()
# Set servo on GPIO17 to 1200µs (1.2ms)
servo.set_servo(17, 1200)
# Set servo on GPIO17 to 2000µs (2.0ms)
servo.set_servo(17, 2000)
# Clear servo on GPIO17
servo.stop_servo(17)
"""
_subcycle_time_us = None
_dma_channel = None
def __init__(self, dma_channel=0, subcycle_time_us=20000, \
pulse_incr_us=10):
"""
Makes sure PWM is setup with the correct increment granularity and
subcycle time.
"""
self._dma_channel = dma_channel
self._subcycle_time_us = subcycle_time_us
if _PWM.is_setup():
_pw_inc = _PWM.get_pulse_incr_us()
if not pulse_incr_us == _pw_inc:
raise AttributeError(("Error: PWM is already setup with pulse-"
"width increment granularity of %sus instead of %sus")\
% (_pw_inc, self.pulse_incr_us))
else:
setup(pulse_incr_us=pulse_incr_us)
def set_servo(self, gpio, pulse_width_us):
"""
Sets a pulse-width on a gpio to repeat every subcycle
(by default every 20ms).
"""
# Make sure we can set the exact pulse_width_us
_pulse_incr_us = _PWM.get_pulse_incr_us()
if pulse_width_us % _pulse_incr_us:
# No clean division possible
raise AttributeError(("Pulse width increment granularity %sus "
"cannot divide a pulse-time of %sus") % (_pulse_incr_us,
pulse_width_us))
# Initialize channel if not already done, else check subcycle time
if _PWM.is_channel_initialized(self._dma_channel):
_subcycle_us = _PWM.get_channel_subcycle_time_us(self._dma_channel)
if _subcycle_us != self._subcycle_time_us:
raise AttributeError(("Error: DMA channel %s is setup with a "
"subcycle_time of %sus (instead of %sus)") % \
(self._dma_channel, _subcycle_us,
self._subcycle_time_us))
else:
init_channel(self._dma_channel, self._subcycle_time_us)
# Add pulse for this GPIO
add_channel_pulse(self._dma_channel, gpio, 0, \
int(pulse_width_us / _pulse_incr_us))
def stop_servo(self, gpio):
""" Stops servo activity for this gpio """
clear_channel_gpio(self._dma_channel, gpio)
| lgpl-3.0 |
sajeeshcs/nested_quota_final | nova/tests/unit/virt/vmwareapi/test_read_write_util.py | 6 | 1897 | # Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import requests
from nova import test
from nova.virt.vmwareapi import read_write_util
CONF = cfg.CONF
class ReadWriteUtilTestCase(test.NoDBTestCase):
@mock.patch.object(requests.api, 'request')
def test_ipv6_host_read(self, mock_request):
ipv6_host = 'fd8c:215d:178e:c51e:200:c9ff:fed1:584c'
port = 7443
folder = 'tmp/fake.txt'
read_write_util.VMwareHTTPReadFile(ipv6_host,
port,
'fake_dc',
'fake_ds',
dict(),
folder)
base_url = 'https://[%s]:%s/folder/%s' % (ipv6_host, port, folder)
base_url += '?dsName=fake_ds&dcPath=fake_dc'
headers = {'User-Agent': 'OpenStack-ESX-Adapter'}
mock_request.assert_called_once_with('get',
base_url,
headers=headers,
allow_redirects=True,
stream=True,
verify=False)
| apache-2.0 |
marinho/geraldo | site/newsite/django_1_0/django/utils/tzinfo.py | 9 | 2424 | "Implementation of tzinfo classes for use with datetime.datetime."
import locale
import time
from datetime import timedelta, tzinfo
from django.utils.encoding import smart_unicode
try:
DEFAULT_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
except:
# Any problems at all determining the locale and we fallback. See #5846.
DEFAULT_ENCODING = 'ascii'
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
self.__offset = timedelta(minutes=offset)
self.__name = u"%+03d%02d" % (offset // 60, offset % 60)
def __repr__(self):
return self.__name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
tzinfo.__init__(self, dt)
self._tzname = self.tzname(dt)
def __repr__(self):
return self._tzname
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
try:
return smart_unicode(time.tzname[self._isdst(dt)], DEFAULT_ENCODING)
except UnicodeDecodeError:
return None
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)
try:
stamp = time.mktime(tt)
except (OverflowError, ValueError):
# 32 bit systems can't handle dates after Jan 2038, and certain
# systems can't handle dates before ~1901-12-01:
#
# >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
# OverflowError: mktime argument out of range
# >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
# ValueError: year out of range
#
# In this case, we fake the date, because we only care about the
# DST flag.
tt = (2037,) + tt[1:]
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
| lgpl-3.0 |
Anlim/decode-Django | Django-1.5.1/django/utils/unittest/main.py | 219 | 9392 | """Unittest main program"""
import sys
import os
import types
from django.utils.unittest import loader, runner
try:
from django.utils.unittest.signals import installHandler
except ImportError:
installHandler = None
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s test_module.TestClass - run tests from
test_module.TestClass
%(progName)s test_module.TestClass.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None,
argv=None, testRunner=None,
testLoader=loader.defaultTestLoader, exit=True,
verbosity=1, failfast=None, catchbreak=None, buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False and installHandler is not None:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None and installHandler is not None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error as msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=loader.TestLoader):
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False and installHandler is not None:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None and installHandler is not None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
def main_():
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None)
| gpl-2.0 |
eul-721/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/sqlalchemy/sql/dml.py | 78 | 29493 | # sql/dml.py
# Copyright (C) 2009-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`.
"""
from .base import Executable, _generative, _from_objects, DialectKWArgs
from .elements import ClauseElement, _literal_as_text, Null, and_, _clone
from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes
from .. import util
from .. import exc
class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
_hints = util.immutabledict()
_prefixes = ()
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict(
(c.key, pval)
for c, pval in zip(self.table.c, p)
)
else:
return p
if isinstance(parameters, (list, tuple)) and \
parameters and \
isinstance(parameters[0], (list, tuple, dict)):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets.")
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
"""Add a :term:`RETURNING` or equivalent clause to this statement.
e.g.::
stmt = table.update().\\
where(table.c.data == 'value').\\
values(status='X').\\
returning(table.c.server_flag, table.c.updated_timestamp)
for server_flag, updated_timestamp in connection.execute(stmt):
print(server_flag, updated_timestamp)
The given collection of column expressions should be derived from
the table that is
the target of the INSERT, UPDATE, or DELETE. While :class:`.Column`
objects are typical, the elements can also be expressions::
stmt = table.insert().returning(
(table.c.first_name + " " + table.c.last_name).label('fullname')
)
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using :meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
.. seealso::
:meth:`.ValuesBase.return_defaults` - an alternative method tailored
towards efficient fetching of server-side defaults and triggers
for single-row INSERTs or UPDATEs.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union(
{(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
_supports_multi_parameters = False
_has_multi_parameters = False
select = None
def __init__(self, table, values, prefixes):
self.table = _interpret_as_from(table)
self.parameters, self._has_multi_parameters = \
self._process_colparams(values)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`.Insert` and :class:`.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`.Connection.execute`. However,
the :meth:`.ValuesBase.values` method can be used to "fix" a particular
set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: Alternatively, a dictionary, tuple or list
of dictionaries or tuples can be passed as a single positional
argument in order to form the VALUES or
SET clause of the statement. The single dictionary form
works the same as the kwargs form::
users.insert().values({"name": "some name"})
If a tuple is passed, the tuple should contain the same number
of columns as the target :class:`.Table`::
users.insert().values((5, "some name"))
The :class:`.Insert` construct also supports multiply-rendered VALUES
construct, for those backends which support this SQL syntax
(SQLite, Postgresql, MySQL). This mode is indicated by passing a list
of one or more dictionaries/tuples::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
In the case of an :class:`.Update`
construct, only the single dictionary/tuple form is accepted,
else an exception is raised. It is also an exception case to
attempt to mix the single-/multiple- value styles together,
either through multiple :meth:`.ValuesBase.values` calls
or by sending a list + kwargs at the same time.
.. note::
Passing a multiple values list is *not* the same
as passing a multiple values list to the :meth:`.Connection.execute`
method. Passing a list of parameter sets to :meth:`.ValuesBase.values`
produces a construct of this form::
INSERT INTO table (col1, col2, col3) VALUES
(col1_0, col2_0, col3_0),
(col1_1, col2_1, col3_1),
...
whereas a multiple list passed to :meth:`.Connection.execute`
has the effect of using the DBAPI
`executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
method, which provides a high-performance system of invoking
a single-row INSERT statement many times against a series
of parameter sets. The "executemany" style is supported by
all database backends, as it does not depend on a special SQL
syntax.
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if self.select is not None:
raise exc.InvalidRequestError(
"This construct already inserts from a SELECT")
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets.")
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.")
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters, self._has_multi_parameters = \
self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaenously")
else:
self.parameters.update(kwargs)
@_generative
def return_defaults(self, *cols):
"""Make use of a :term:`RETURNING` clause for the purpose
of fetching server-side expressions and defaults.
E.g.::
stmt = table.insert().values(data='newdata').return_defaults()
result = connection.execute(stmt)
server_created_at = result.returned_defaults['created_at']
When used against a backend that supports RETURNING, all column
values generated by SQL expression or server-side-default will be added
to any existing RETURNING clause, provided that
:meth:`.UpdateBase.returning` is not used simultaneously. The column values
will then be available on the result using the
:attr:`.ResultProxy.returned_defaults` accessor as a
dictionary, referring to values keyed to the :class:`.Column` object
as well as its ``.key``.
This method differs from :meth:`.UpdateBase.returning` in these ways:
1. :meth:`.ValuesBase.return_defaults` is only intended for use with
an INSERT or an UPDATE statement that matches exactly one row.
While the RETURNING construct in the general sense supports multiple
rows for a multi-row UPDATE or DELETE statement, or for special
cases of INSERT that return multiple rows (e.g. INSERT from SELECT,
multi-valued VALUES clause), :meth:`.ValuesBase.return_defaults`
is intended only
for an "ORM-style" single-row INSERT/UPDATE statement. The row
returned by the statement is also consumed implcitly when
:meth:`.ValuesBase.return_defaults` is used. By contrast,
:meth:`.UpdateBase.returning` leaves the RETURNING result-set intact
with a collection of any number of rows.
2. It is compatible with the existing logic to fetch auto-generated
primary key values, also known as "implicit returning". Backends that
support RETURNING will automatically make use of RETURNING in order
to fetch the value of newly generated primary keys; while the
:meth:`.UpdateBase.returning` method circumvents this behavior,
:meth:`.ValuesBase.return_defaults` leaves it intact.
3. It can be called against any backend. Backends that don't support
RETURNING will skip the usage of the feature, rather than raising
an exception. The return value of :attr:`.ResultProxy.returned_defaults`
will be ``None``
:meth:`.ValuesBase.return_defaults` is used by the ORM to provide
an efficient implementation for the ``eager_defaults`` feature of
:func:`.mapper`.
:param cols: optional list of column key names or :class:`.Column`
objects. If omitted, all column expressions evaulated on the server
are added to the returning list.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.UpdateBase.returning`
:attr:`.ResultProxy.returned_defaults`
"""
self._return_defaults = cols or True
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_supports_multi_parameters = True
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Insert` object.
Similar functionality is available via the
:meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: :class:`.TableClause` which is the subject of the insert.
:param values: collection of values to be inserted; see
:meth:`.Insert.values` for a description of allowed formats here.
Can be omitted entirely; a :class:`.Insert` construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = self.select_names = None
self.inline = inline
self._returning = returning
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
@_generative
def from_select(self, names, select):
"""Return a new :class:`.Insert` construct which represents
an ``INSERT...FROM SELECT`` statement.
e.g.::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert().from_select(['a', 'b'], sel)
:param names: a sequence of string column names or :class:`.Column`
objects representing the target columns.
:param select: a :func:`.select` construct, :class:`.FromClause`
or other construct which resolves into a :class:`.FromClause`,
such as an ORM :class:`.Query` object, etc. The order of
columns returned from this FROM clause should correspond to the
order of columns sent as the ``names`` parameter; while this
is not checked before passing along to the database, the database
would normally raise an exception if these column lists don't
correspond.
.. note::
Depending on backend, it may be necessary for the :class:`.Insert`
statement to be constructed using the ``inline=True`` flag; this
flag will prevent the implicit usage of ``RETURNING`` when the
``INSERT`` statement is rendered, which isn't supported on a backend
such as Oracle in conjunction with an ``INSERT..SELECT`` combination::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert(inline=True).from_select(['a', 'b'], sel)
.. note::
A SELECT..INSERT construct in SQL has no VALUES clause. Therefore
:class:`.Column` objects which utilize Python-side defaults
(e.g. as described at :ref:`metadata_defaults_toplevel`)
will **not** take effect when using :meth:`.Insert.from_select`.
.. versionadded:: 0.8.3
"""
if self.parameters:
raise exc.InvalidRequestError(
"This construct already inserts value expressions")
self.parameters, self._has_multi_parameters = \
self._process_colparams(dict((n, Null()) for n in names))
self.select_names = names
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
if self.select is not None:
self.select = _clone(self.select)
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause=None,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Update` object.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\\
values(name='user #5')
Similar functionality is available via the
:meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\\
where(users.c.id==5).\\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause=None,
bind=None,
returning=None,
prefixes=None,
**dialect_kw):
"""Construct :class:`.Delete` object.
Similar functionality is available via the
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
self._bind = bind
self.table = _interpret_as_from(table)
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self._validate_dialect_kwargs(dialect_kw)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
| gpl-2.0 |
piffey/ansible | lib/ansible/module_utils/aws/direct_connect.py | 97 | 4009 | # Copyright (c) 2017 Ansible Project
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Direct Connect modules.
"""
import traceback
try:
import botocore
except ImportError:
pass
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class DirectConnectError(Exception):
def __init__(self, msg, last_traceback=None, exception=None):
self.msg = msg
self.last_traceback = last_traceback
self.exception = exception
def delete_connection(client, connection_id):
try:
client.delete_connection(connectionId=connection_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
last_traceback=traceback.format_exc(),
exception=e)
def associate_connection_and_lag(client, connection_id, lag_id):
try:
client.associate_connection_with_lag(connectionId=connection_id,
lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
" with link aggregation group {1}.".format(connection_id, lag_id),
last_traceback=traceback.format_exc(),
exception=e)
def disassociate_connection_and_lag(client, connection_id, lag_id):
try:
client.disassociate_connection_from_lag(connectionId=connection_id,
lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
" from link aggregation group {1}.".format(connection_id, lag_id),
last_traceback=traceback.format_exc(),
exception=e)
def delete_virtual_interface(client, virtual_interface):
try:
client.delete_virtual_interface(virtualInterfaceId=virtual_interface)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
last_traceback=traceback.format_exc(),
exception=e)
| gpl-3.0 |
Dhivyap/ansible | lib/ansible/modules/network/ingate/ig_config.py | 5 | 16005 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ingate Systems AB
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ig_config
short_description: Manage the configuration database on an Ingate SBC.
description:
- Manage the configuration database on an Ingate SBC.
version_added: 2.8
extends_documentation_fragment: ingate
options:
add:
description:
- Add a row to a table.
type: bool
delete:
description:
- Delete all rows in a table or a specific row.
type: bool
get:
description:
- Return all rows in a table or a specific row.
type: bool
modify:
description:
- Modify a row in a table.
type: bool
revert:
description:
- Reset the preliminary configuration.
type: bool
factory:
description:
- Reset the preliminary configuration to its factory defaults.
type: bool
store:
description:
- Store the preliminary configuration.
type: bool
no_response:
description:
- Expect no response when storing the preliminary configuration.
Refer to the C(store) option.
type: bool
return_rowid:
description:
- Get rowid(s) from a table where the columns match.
type: bool
download:
description:
- Download the configuration database from the unit.
type: bool
store_download:
description:
- If the downloaded configuration should be stored on disk.
Refer to the C(download) option.
type: bool
default: false
path:
description:
- Where in the filesystem to store the downloaded configuration.
Refer to the C(download) option.
filename:
description:
- The name of the file to store the downloaded configuration in.
Refer to the C(download) option.
table:
description:
- The name of the table.
rowid:
description:
- A row id.
type: int
columns:
description:
- A dict containing column names/values.
notes:
- If C(store_download) is set to True, and C(path) and C(filename) is omitted,
the file will be stored in the current directory with an automatic filename.
author:
- Ingate Systems AB (@ingatesystems)
'''
EXAMPLES = '''
- name: Add/remove DNS servers
hosts: 192.168.1.1
connection: local
vars:
client_rw:
version: v1
address: "{{ inventory_hostname }}"
scheme: http
username: alice
password: foobar
tasks:
- name: Load factory defaults
ig_config:
client: "{{ client_rw }}"
factory: true
register: result
- debug:
var: result
- name: Revert to last known applied configuration
ig_config:
client: "{{ client_rw }}"
revert: true
register: result
- debug:
var: result
- name: Change the unit name
ig_config:
client: "{{ client_rw }}"
modify: true
table: misc.unitname
columns:
unitname: "Test Ansible"
register: result
- debug:
var: result
- name: Add a DNS server
ig_config:
client: "{{ client_rw }}"
add: true
table: misc.dns_servers
columns:
server: 192.168.1.21
register: result
- debug:
var: result
- name: Add a DNS server
ig_config:
client: "{{ client_rw }}"
add: true
table: misc.dns_servers
columns:
server: 192.168.1.22
register: result
- debug:
var: result
- name: Add a DNS server
ig_config:
client: "{{ client_rw }}"
add: true
table: misc.dns_servers
columns:
server: 192.168.1.23
register: last_dns
- debug:
var: last_dns
- name: Modify the last added DNS server
ig_config:
client: "{{ client_rw }}"
modify: true
table: misc.dns_servers
rowid: "{{ last_dns['add'][0]['id'] }}"
columns:
server: 192.168.1.24
register: result
- debug:
var: result
- name: Return the last added DNS server
ig_config:
client: "{{ client_rw }}"
get: true
table: misc.dns_servers
rowid: "{{ last_dns['add'][0]['id'] }}"
register: result
- debug:
var: result
- name: Remove last added DNS server
ig_config:
client: "{{ client_rw }}"
delete: true
table: misc.dns_servers
rowid: "{{ last_dns['add'][0]['id'] }}"
register: result
- debug:
var: result
- name: Return the all rows from table misc.dns_servers
ig_config:
client: "{{ client_rw }}"
get: true
table: misc.dns_servers
register: result
- debug:
var: result
- name: Remove remaining DNS servers
ig_config:
client: "{{ client_rw }}"
delete: true
table: misc.dns_servers
register: result
- debug:
var: result
- name: Get rowid for interface eth0
ig_config:
client: "{{ client_rw }}"
return_rowid: true
table: network.local_nets
columns:
interface: eth0
register: result
- debug:
var: result
- name: Store the preliminary configuration
ig_config:
client: "{{ client_rw }}"
store: true
register: result
- debug:
var: result
- name: Do backup of the configuration database
ig_config:
client: "{{ client_rw }}"
download: true
store_download: true
register: result
- debug:
var: result
'''
RETURN = '''
add:
description: A list containing information about the added row
returned: when C(add) is yes and success
type: complex
contains:
href:
description: The REST API URL to the added row
returned: success
type: string
sample: http://192.168.1.1/api/v1/misc/dns_servers/2
data:
description: Column names/values
returned: success
type: complex
sample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 22
delete:
description: A list containing information about the deleted row(s)
returned: when C(delete) is yes and success
type: complex
contains:
table:
description: The name of the table
returned: success
type: string
sample: misc.dns_servers
data:
description: Column names/values
returned: success
type: complex
sample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 22
get:
description: A list containing information about the row(s)
returned: when C(get) is yes and success
type: complex
contains:
table:
description: The name of the table
returned: success
type: string
sample: Testname
href:
description: The REST API URL to the row
returned: success
type: string
sample: http://192.168.1.1/api/v1/misc/dns_servers/1
data:
description: Column names/values
returned: success
type: complex
sample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 1
modify:
description: A list containing information about the modified row
returned: when C(modify) is yes and success
type: complex
contains:
table:
description: The name of the table
returned: success
type: string
sample: Testname
href:
description: The REST API URL to the modified row
returned: success
type: string
sample: http://192.168.1.1/api/v1/misc/dns_servers/1
data:
description: Column names/values
returned: success
type: complex
gsample: {'number': '2', 'server': '10.48.254.33'}
id:
description: The row id
returned: success
type: int
sample: 10
revert:
description: A command status message
returned: when C(revert) is yes and success
type: complex
contains:
msg:
description: The command status message
returned: success
type: string
sample: reverted the configuration to the last applied configuration.
factory:
description: A command status message
returned: when C(factory) is yes and success
type: complex
contains:
msg:
description: The command status message
returned: success
type: string
sample: reverted the configuration to the factory configuration.
store:
description: A command status message
returned: when C(store) is yes and success
type: complex
contains:
msg:
description: The command status message
returned: success
type: string
sample: Successfully applied and saved the configuration.
return_rowid:
description: The matched row id(s).
returned: when C(return_rowid) is yes and success
type: list
sample: [1, 3]
download:
description: Configuration database and meta data
returned: when C(download) is yes and success
type: complex
contains:
config:
description: The configuration database
returned: success
type: string
filename:
description: A suggested name for the configuration
returned: success
type: string
sample: testname_2018-10-01T214040.cfg
mimetype:
description: The mimetype
returned: success
type: string
sample: application/x-config-database
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ingate.common import (ingate_argument_spec,
ingate_create_client)
try:
from ingate import ingatesdk
HAS_INGATESDK = True
except ImportError:
HAS_INGATESDK = False
def make_request(module):
# Create client and authenticate.
api_client = ingate_create_client(**module.params)
if module.params.get('add'):
# Add a row to a table.
table = module.params['table']
columns = module.params['columns']
response = api_client.add_row(table, **columns)
return True, 'add', response
elif module.params.get('delete'):
# Delete a row/table.
changed = False
table = module.params['table']
rowid = module.params.get('rowid')
if rowid:
response = api_client.delete_row(table, rowid=rowid)
else:
response = api_client.delete_table(table)
if response:
changed = True
return changed, 'delete', response
elif module.params.get('get'):
# Get the contents of a table/row.
table = module.params['table']
rowid = module.params.get('rowid')
if rowid:
response = api_client.dump_row(table, rowid=rowid)
else:
response = api_client.dump_table(table)
if response:
changed = True
return changed, 'get', response
elif module.params.get('modify'):
# Modify a table row.
table = module.params['table']
columns = module.params['columns']
rowid = module.params.get('rowid')
if rowid:
response = api_client.modify_row(table, rowid=rowid, **columns)
else:
response = api_client.modify_single_row(table, **columns)
if response:
changed = True
return changed, 'modify', response
elif module.params.get('revert'):
# Revert edits.
response = api_client.revert_edits()
if response:
response = response[0]['revert-edits']
return True, 'revert', response
elif module.params.get('factory'):
# Load factory defaults.
response = api_client.load_factory()
if response:
response = response[0]['load-factory']
return True, 'factory', response
elif module.params.get('store'):
# Store edit.
no_response = module.params.get('no_response')
response = api_client.store_edit(no_response=no_response)
if response:
response = response[0]['store-edit']
return True, 'store', response
elif module.params.get('return_rowid'):
# Find matching rowid(s) in a table.
table = module.params['table']
columns = module.params['columns']
response = api_client.dump_table(table)
rowids = []
for row in response:
match = False
for (name, value) in columns.items():
if name not in row['data']:
continue
if not row['data'][name] == value:
match = False
break
else:
match = True
if match:
rowids.append(row['id'])
return False, 'return_rowid', rowids
elif module.params.get('download'):
# Download the configuration database.
store = module.params.get('store_download')
path = module.params.get('path')
filename = module.params.get('filename')
response = api_client.download_config(store=store, path=path,
filename=filename)
if response:
response = response[0]['download-config']
return False, 'download', response
return False, '', {}
def main():
argument_spec = ingate_argument_spec(
add=dict(type='bool'),
delete=dict(type='bool'),
get=dict(type='bool'),
modify=dict(type='bool'),
revert=dict(type='bool'),
factory=dict(type='bool'),
store=dict(type='bool'),
no_response=dict(type='bool', default=False),
return_rowid=dict(type='bool'),
download=dict(type='bool'),
store_download=dict(type='bool', default=False),
path=dict(),
filename=dict(),
table=dict(),
rowid=dict(type='int'),
columns=dict(type='dict'),
)
mutually_exclusive = [('add', 'delete', 'get', 'modify', 'revert',
'factory', 'store', 'return_rowid', 'download')]
required_one_of = [['add', 'delete', 'get', 'modify', 'revert', 'factory',
'store', 'return_rowid', 'download']]
required_if = [('add', True, ['table', 'columns']),
('delete', True, ['table']),
('get', True, ['table']),
('modify', True, ['table', 'columns']),
('return_rowid', True, ['table', 'columns'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
required_one_of=required_one_of,
supports_check_mode=False)
if not HAS_INGATESDK:
module.fail_json(msg='The Ingate Python SDK module is required')
result = dict(changed=False)
try:
changed, command, response = make_request(module)
if response and command:
result[command] = response
result['changed'] = changed
except ingatesdk.SdkError as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
crckyl/pixplus | tools/conf-parser.py | 1 | 1750 | import sys
import json
from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
format = ' <preference name="%(name)s" value=%(value)s />'
if sys.argv[1] == 'safari':
format = ''' <dict>
<key>Title</key>
<string>%(name)s</string>
<key>Key</key>
<string>%(name)s</string>
<key>DefaultValue</key>
%(value_safari)s
<key>Type</key>
<string>%(type_safari)s</string>%(more)s
</dict>'''
pass
def print_conf(conf):
for sec in conf:
for item in sec['items']:
name = 'conf_%s_%s' % (sec['name'], item['key'])
value = item['value']
type_safari = 'TextField'
value_safari = '<string>%s</string>' % escape(str(value))
more = ''
if 'hint' in item:
type_safari = 'PopUpButton'
more = '''
<key>Titles</key>
<array>'''
for hint in item['hint']:
more += '\n <string>%s</string>' % hint['title']
pass
more += '\n </array>'
more += '''
<key>Values</key>
<array>'''
for hint in item['hint']:
more += '\n <string>%s</string>' % str(hint['value'])
pass
more += '\n </array>'
elif isinstance(value, bool):
type_safari = 'CheckBox'
if value:
value = 'true'
else:
value = 'false'
pass
value_safari = '<%s/>' % value
pass
params = {
'name': name,
'value': quoteattr(str(value)),
'type_safari': type_safari,
'value_safari': value_safari,
'more': more
}
print(format % params)
pass
pass
pass
print_conf(json.loads(sys.stdin.read()))
| mit |
laayis/yowsup | yowsup/structs/protocoltreenode.py | 38 | 4491 | import binascii
import sys
class ProtocolTreeNode(object):
def __init__(self, tag, attributes = None, children = None, data = None):
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.data = data
assert type(self.children) is list, "Children must be a list, got %s" % type(self.children)
def __eq__(self, protocolTreeNode):
"""
:param protocolTreeNode: ProtocolTreeNode
:return: bool
"""
#
if protocolTreeNode.__class__ == ProtocolTreeNode\
and self.tag == protocolTreeNode.tag\
and self.data == protocolTreeNode.data\
and self.attributes == protocolTreeNode.attributes\
and len(self.getAllChildren()) == len(protocolTreeNode.getAllChildren()):
found = False
for c in self.getAllChildren():
for c2 in protocolTreeNode.getAllChildren():
if c == c2:
found = True
break
if not found:
return False
found = False
for c in protocolTreeNode.getAllChildren():
for c2 in self.getAllChildren():
if c == c2:
found = True
break
if not found:
return False
return True
return False
def __hash__(self):
return hash(self.tag) ^ hash(tuple(self.attributes.items())) ^ hash(self.data)
def toString(self):
out = "<"+self.tag
if self.attributes is not None:
for key,val in self.attributes.items():
out+= " "+key+'="'+val+'"'
out+= ">\n"
if self.data is not None:
if type(self.data) is bytearray:
try:
out += "%s" % self.data.decode()
except UnicodeDecodeError:
out += binascii.hexlify(self.data)
else:
out += "%s" % self.data
if type(self.data) is str and sys.version_info >= (3,0):
out += "\nHEX3:%s\n" % binascii.hexlify(self.data.encode('latin-1'))
else:
out += "\nHEX:%s\n" % binascii.hexlify(self.data)
for c in self.children:
try:
out += c.toString()
except UnicodeDecodeError:
out += "[ENCODED DATA]\n"
out+= "</"+self.tag+">\n"
return out
def __str__(self):
return self.toString()
def getData(self):
return self.data
def setData(self, data):
self.data = data
@staticmethod
def tagEquals(node,string):
return node is not None and node.tag is not None and node.tag == string
@staticmethod
def require(node,string):
if not ProtocolTreeNode.tagEquals(node,string):
raise Exception("failed require. string: "+string);
def __getitem__(self, key):
return self.getAttributeValue(key)
def __setitem__(self, key, val):
self.setAttribute(key, val)
def __delitem__(self, key):
self.removeAttribute(key)
def getChild(self,identifier):
if type(identifier) == int:
if len(self.children) > identifier:
return self.children[identifier]
else:
return None
for c in self.children:
if identifier == c.tag:
return c
return None
def hasChildren(self):
return len(self.children) > 0
def addChild(self, childNode):
self.children.append(childNode)
def addChildren(self, children):
for c in children:
self.addChild(c)
def getAttributeValue(self,string):
try:
return self.attributes[string]
except KeyError:
return None
def removeAttribute(self, key):
if key in self.attributes:
del self.attributes[key]
def setAttribute(self, key, value):
self.attributes[key] = value
def getAllChildren(self,tag = None):
ret = []
if tag is None:
return self.children
for c in self.children:
if tag == c.tag:
ret.append(c)
return ret
| gpl-3.0 |
mbeacom/locust | locust/test/test_wait_time.py | 1 | 2327 | import random
import time
from locust import User, TaskSet, between, constant, constant_pacing
from locust.exception import MissingWaitTimeError
from .testcases import LocustTestCase
class TestWaitTime(LocustTestCase):
def test_between(self):
class MyUser(User):
wait_time = between(3, 9)
class TaskSet1(TaskSet):
pass
class TaskSet2(TaskSet):
wait_time = between(20.0, 21.0)
u = MyUser(self.environment)
ts1 = TaskSet1(u)
ts2 = TaskSet2(u)
for i in range(100):
w = u.wait_time()
self.assertGreaterEqual(w, 3)
self.assertLessEqual(w, 9)
w = ts1.wait_time()
self.assertGreaterEqual(w, 3)
self.assertLessEqual(w, 9)
for i in range(100):
w = ts2.wait_time()
self.assertGreaterEqual(w, 20)
self.assertLessEqual(w, 21)
def test_constant(self):
class MyUser(User):
wait_time = constant(13)
class TaskSet1(TaskSet):
pass
self.assertEqual(13, MyUser(self.environment).wait_time())
self.assertEqual(13, TaskSet1(MyUser(self.environment)).wait_time())
def test_default_wait_time(self):
class MyUser(User):
pass # default is wait_time = constant(0)
class TaskSet1(TaskSet):
pass
self.assertEqual(0, MyUser(self.environment).wait_time())
self.assertEqual(0, TaskSet1(MyUser(self.environment)).wait_time())
taskset = TaskSet1(MyUser(self.environment))
start_time = time.monotonic()
taskset.wait()
self.assertLess(time.monotonic() - start_time, 0.002)
def test_constant_pacing(self):
class MyUser(User):
wait_time = constant_pacing(0.1)
class TS(TaskSet):
pass
ts = TS(MyUser(self.environment))
ts2 = TS(MyUser(self.environment))
previous_time = time.monotonic()
for i in range(7):
ts.wait()
since_last_run = time.monotonic() - previous_time
self.assertLess(abs(0.1 - since_last_run), 0.02)
previous_time = time.monotonic()
time.sleep(random.random() * 0.1)
_ = ts2.wait_time()
_ = ts2.wait_time()
| mit |
vasylbo/aiohttp | examples/mpsrv.py | 18 | 9115 | #!/usr/bin/env python3
"""Simple multiprocess http server written using an event loop."""
import argparse
import os
import socket
import signal
import time
import asyncio
import aiohttp
import aiohttp.server
from aiohttp import websocket
ARGS = argparse.ArgumentParser(description="Run simple http server.")
ARGS.add_argument(
'--host', action="store", dest='host',
default='127.0.0.1', help='Host name')
ARGS.add_argument(
'--port', action="store", dest='port',
default=8080, type=int, help='Port number')
ARGS.add_argument(
'--workers', action="store", dest='workers',
default=2, type=int, help='Number of workers.')
class HttpRequestHandler(aiohttp.server.ServerHttpProtocol):
@asyncio.coroutine
def handle_request(self, message, payload):
print('{}: method = {!r}; path = {!r}; version = {!r}'.format(
os.getpid(), message.method, message.path, message.version))
path = message.path
if (not (path.isprintable() and path.startswith('/')) or '/.' in path):
path = None
else:
path = '.' + path
if not os.path.exists(path):
path = None
else:
isdir = os.path.isdir(path)
if not path:
raise aiohttp.HttpProcessingError(code=404)
if isdir and not path.endswith('/'):
path = path + '/'
raise aiohttp.HttpProcessingError(
code=302, headers=(('URI', path), ('Location', path)))
response = aiohttp.Response(
self.writer, 200, http_version=message.version)
response.add_header('Transfer-Encoding', 'chunked')
# content encoding
accept_encoding = message.headers.get('accept-encoding', '').lower()
if 'deflate' in accept_encoding:
response.add_header('Content-Encoding', 'deflate')
response.add_compression_filter('deflate')
elif 'gzip' in accept_encoding:
response.add_header('Content-Encoding', 'gzip')
response.add_compression_filter('gzip')
response.add_chunking_filter(1025)
if isdir:
response.add_header('Content-type', 'text/html')
response.send_headers()
response.write(b'<ul>\r\n')
for name in sorted(os.listdir(path)):
if name.isprintable() and not name.startswith('.'):
try:
bname = name.encode('ascii')
except UnicodeError:
pass
else:
if os.path.isdir(os.path.join(path, name)):
response.write(b'<li><a href="' + bname +
b'/">' + bname + b'/</a></li>\r\n')
else:
response.write(b'<li><a href="' + bname +
b'">' + bname + b'</a></li>\r\n')
response.write(b'</ul>')
else:
response.add_header('Content-type', 'text/plain')
response.send_headers()
try:
with open(path, 'rb') as fp:
chunk = fp.read(8192)
while chunk:
response.write(chunk)
chunk = fp.read(8192)
except OSError:
response.write(b'Cannot open')
yield from response.write_eof()
if response.keep_alive():
self.keep_alive(True)
class ChildProcess:
def __init__(self, up_read, down_write, args, sock):
self.up_read = up_read
self.down_write = down_write
self.args = args
self.sock = sock
def start(self):
# start server
self.loop = loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
def stop():
self.loop.stop()
os._exit(0)
loop.add_signal_handler(signal.SIGINT, stop)
f = loop.create_server(
lambda: HttpRequestHandler(debug=True, keep_alive=75),
sock=self.sock)
srv = loop.run_until_complete(f)
x = srv.sockets[0]
print('Starting srv worker process {} on {}'.format(
os.getpid(), x.getsockname()))
# heartbeat
asyncio.async(self.heartbeat())
asyncio.get_event_loop().run_forever()
os._exit(0)
@asyncio.coroutine
def heartbeat(self):
# setup pipes
read_transport, read_proto = yield from self.loop.connect_read_pipe(
aiohttp.StreamProtocol, os.fdopen(self.up_read, 'rb'))
write_transport, _ = yield from self.loop.connect_write_pipe(
aiohttp.StreamProtocol, os.fdopen(self.down_write, 'wb'))
reader = read_proto.reader.set_parser(websocket.WebSocketParser)
writer = websocket.WebSocketWriter(write_transport)
while True:
try:
msg = yield from reader.read()
except:
print('Supervisor is dead, {} stopping...'.format(os.getpid()))
self.loop.stop()
break
if msg.tp == websocket.MSG_PING:
writer.pong()
elif msg.tp == websocket.MSG_CLOSE:
break
read_transport.close()
write_transport.close()
class Worker:
_started = False
def __init__(self, loop, args, sock):
self.loop = loop
self.args = args
self.sock = sock
self.start()
def start(self):
assert not self._started
self._started = True
up_read, up_write = os.pipe()
down_read, down_write = os.pipe()
args, sock = self.args, self.sock
pid = os.fork()
if pid:
# parent
os.close(up_read)
os.close(down_write)
asyncio.async(self.connect(pid, up_write, down_read))
else:
# child
os.close(up_write)
os.close(down_read)
# cleanup after fork
asyncio.set_event_loop(None)
# setup process
process = ChildProcess(up_read, down_write, args, sock)
process.start()
@asyncio.coroutine
def heartbeat(self, writer):
while True:
yield from asyncio.sleep(15)
if (time.monotonic() - self.ping) < 30:
writer.ping()
else:
print('Restart unresponsive worker process: {}'.format(
self.pid))
self.kill()
self.start()
return
@asyncio.coroutine
def chat(self, reader):
while True:
try:
msg = yield from reader.read()
except:
print('Restart unresponsive worker process: {}'.format(
self.pid))
self.kill()
self.start()
return
if msg.tp == websocket.MSG_PONG:
self.ping = time.monotonic()
@asyncio.coroutine
def connect(self, pid, up_write, down_read):
# setup pipes
read_transport, proto = yield from self.loop.connect_read_pipe(
aiohttp.StreamProtocol, os.fdopen(down_read, 'rb'))
write_transport, _ = yield from self.loop.connect_write_pipe(
aiohttp.StreamProtocol, os.fdopen(up_write, 'wb'))
# websocket protocol
reader = proto.reader.set_parser(websocket.WebSocketParser)
writer = websocket.WebSocketWriter(write_transport)
# store info
self.pid = pid
self.ping = time.monotonic()
self.rtransport = read_transport
self.wtransport = write_transport
self.chat_task = asyncio.Task(self.chat(reader))
self.heartbeat_task = asyncio.Task(self.heartbeat(writer))
def kill(self):
self._started = False
self.chat_task.cancel()
self.heartbeat_task.cancel()
self.rtransport.close()
self.wtransport.close()
os.kill(self.pid, signal.SIGTERM)
class Supervisor:
def __init__(self, args):
self.loop = asyncio.get_event_loop()
self.args = args
self.workers = []
def start(self):
# bind socket
sock = self.sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.args.host, self.args.port))
sock.listen(1024)
sock.setblocking(False)
# start processes
for idx in range(self.args.workers):
self.workers.append(Worker(self.loop, self.args, sock))
self.loop.add_signal_handler(signal.SIGINT, lambda: self.loop.stop())
self.loop.run_forever()
def main():
if getattr(os, "fork", None) is None:
print("os.fork isn't supported by your OS")
return
args = ARGS.parse_args()
if ':' in args.host:
args.host, port = args.host.split(':', 1)
args.port = int(port)
supervisor = Supervisor(args)
supervisor.start()
if __name__ == '__main__':
main()
| apache-2.0 |
annajordanous/network-analysis | detect_english2.py | 2 | 2442 | # Approximate error rate:
# Of 1000 comments identified as English, only one was not judged to
# be in English by the programmer (a short comment in Spanish that
# included two English words and some Spanish words that are spelled
# like English words). Harder to quantify what happened with those
# comments not identified as English. For the most part, these were a
# mixture of non-English comments and very short English comments,
# some with non-standard spellings (e.g. 'spooooooky' or 'swagggg!'),
# a small number with non-standard punctuation that confused the
# word-separating algorithm (e.g. 'i-love-this-track'), and a much
# larger number with no lemmas (e.g. the very common
# 'thanks!'). Longer ones missed out tended to be those with few
# lemmas (e.g. 'thanks 4 all great replies guys, really really
# inspires me!' - 'thanks', 'guys', 'replies', 'really', and
# 'inspires' all have affixes, for example, while '4' and 'me' will
# have appeared in the French dictionary, which left only 'all').
# Overall, not bad. Most regretable problem is a tendency to filter
# out some of the tastiest comments, e.g. 'soooo unhealthy fakkkkk!!!
# dope shit meng'. This would have been picked up if only 'unhealthy'
# were a lemma, but it highlights a bigger problem, i.e. that for
# analytic purposes we really we need a human to identify that 'soooo'
# and 'fakkkkk' are instances of the same lexeme as 'so' and
# 'fuck'. The loss of the very many repetitions of 'thanks' is
# relatively unimportant.
# Note that dictionary of internet terms, e.g. 'omg', 'lol', and
# variants, only added for English.
import wordsets
import string
# Might be good to do something about respellings. Easiest would be to
# work with repeated letters. The following letters can be double in
# English; none can be triple: e,d,f,l,n,o,p,r,s,t
def count_langs(wordlist,langsetsdict):
d={lang:0 for lang in langsetsdict}
for word in wordlist:
for lang in langsetsdict:
if word in langsetsdict[lang]:
d[lang] += 1
return d
def englishp(t,min_eng):
wordlist = [w for w in [w.strip(string.punctuation) for w in t.split()]
if w]
counted = count_langs(wordlist,wordsets.lang_sets)
if counted['en'] == 0: return False
if counted['en'] / float(len(wordlist)) < min_eng: return False
if counted['en'] < max(v for k,v in counted.items()): return False
return True
| gpl-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/cherrypy/cherrypy/__init__.py | 82 | 21667 | """CherryPy is a pythonic, object-oriented HTTP framework.
CherryPy consists of not one, but four separate API layers.
The APPLICATION LAYER is the simplest. CherryPy applications are written as
a tree of classes and methods, where each branch in the tree corresponds to
a branch in the URL path. Each method is a 'page handler', which receives
GET and POST params as keyword arguments, and returns or yields the (HTML)
body of the response. The special method name 'index' is used for paths
that end in a slash, and the special method name 'default' is used to
handle multiple paths via a single handler. This layer also includes:
* the 'exposed' attribute (and cherrypy.expose)
* cherrypy.quickstart()
* _cp_config attributes
* cherrypy.tools (including cherrypy.session)
* cherrypy.url()
The ENVIRONMENT LAYER is used by developers at all levels. It provides
information about the current request and response, plus the application
and server environment, via a (default) set of top-level objects:
* cherrypy.request
* cherrypy.response
* cherrypy.engine
* cherrypy.server
* cherrypy.tree
* cherrypy.config
* cherrypy.thread_data
* cherrypy.log
* cherrypy.HTTPError, NotFound, and HTTPRedirect
* cherrypy.lib
The EXTENSION LAYER allows advanced users to construct and share their own
plugins. It consists of:
* Hook API
* Tool API
* Toolbox API
* Dispatch API
* Config Namespace API
Finally, there is the CORE LAYER, which uses the core API's to construct
the default components which are available at higher layers. You can think
of the default components as the 'reference implementation' for CherryPy.
Megaframeworks (and advanced users) may replace the default components
with customized or extended components. The core API's are:
* Application API
* Engine API
* Request API
* Server API
* WSGI API
These API's are described in the CherryPy specification:
http://www.cherrypy.org/wiki/CherryPySpec
"""
__version__ = "3.2.2"
from cherrypy._cpcompat import urljoin as _urljoin, urlencode as _urlencode
from cherrypy._cpcompat import basestring, unicodestr, set
from cherrypy._cperror import HTTPError, HTTPRedirect, InternalRedirect
from cherrypy._cperror import NotFound, CherryPyException, TimeoutError
from cherrypy import _cpdispatch as dispatch
from cherrypy import _cptools
tools = _cptools.default_toolbox
Tool = _cptools.Tool
from cherrypy import _cprequest
from cherrypy.lib import httputil as _httputil
from cherrypy import _cptree
tree = _cptree.Tree()
from cherrypy._cptree import Application
from cherrypy import _cpwsgi as wsgi
from cherrypy import process
try:
from cherrypy.process import win32
engine = win32.Win32Bus()
engine.console_control_handler = win32.ConsoleCtrlHandler(engine)
del win32
except ImportError:
engine = process.bus
# Timeout monitor. We add two channels to the engine
# to which cherrypy.Application will publish.
engine.listeners['before_request'] = set()
engine.listeners['after_request'] = set()
class _TimeoutMonitor(process.plugins.Monitor):
def __init__(self, bus):
self.servings = []
process.plugins.Monitor.__init__(self, bus, self.run)
def before_request(self):
self.servings.append((serving.request, serving.response))
def after_request(self):
try:
self.servings.remove((serving.request, serving.response))
except ValueError:
pass
def run(self):
"""Check timeout on all responses. (Internal)"""
for req, resp in self.servings:
resp.check_timeout()
engine.timeout_monitor = _TimeoutMonitor(engine)
engine.timeout_monitor.subscribe()
engine.autoreload = process.plugins.Autoreloader(engine)
engine.autoreload.subscribe()
engine.thread_manager = process.plugins.ThreadManager(engine)
engine.thread_manager.subscribe()
engine.signal_handler = process.plugins.SignalHandler(engine)
from cherrypy import _cpserver
server = _cpserver.Server()
server.subscribe()
def quickstart(root=None, script_name="", config=None):
"""Mount the given root, start the builtin server (and engine), then block.
root: an instance of a "controller class" (a collection of page handler
methods) which represents the root of the application.
script_name: a string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the URL
at which to mount the given root. For example, if root.index() will
handle requests to "http://www.example.com:8080/dept/app1/", then
the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the root
of the URI, it MUST be an empty string (not "/").
config: a file or dict containing application config. If this contains
a [global] section, those entries will be used in the global
(site-wide) config.
"""
if config:
_global_conf_alias.update(config)
tree.mount(root, script_name, config)
if hasattr(engine, "signal_handler"):
engine.signal_handler.subscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.subscribe()
engine.start()
engine.block()
from cherrypy._cpcompat import threadlocal as _local
class _Serving(_local):
"""An interface for registering request and response objects.
Rather than have a separate "thread local" object for the request and
the response, this class works as a single threadlocal container for
both objects (and any others which developers wish to define). In this
way, we can easily dump those objects when we stop/start a new HTTP
conversation, yet still refer to them as module-level globals in a
thread-safe way.
"""
request = _cprequest.Request(_httputil.Host("127.0.0.1", 80),
_httputil.Host("127.0.0.1", 1111))
"""
The request object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
response = _cprequest.Response()
"""
The response object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
def load(self, request, response):
self.request = request
self.response = response
def clear(self):
"""Remove all attributes of self."""
self.__dict__.clear()
serving = _Serving()
class _ThreadLocalProxy(object):
__slots__ = ['__attrname__', '__dict__']
def __init__(self, attrname):
self.__attrname__ = attrname
def __getattr__(self, name):
child = getattr(serving, self.__attrname__)
return getattr(child, name)
def __setattr__(self, name, value):
if name in ("__attrname__", ):
object.__setattr__(self, name, value)
else:
child = getattr(serving, self.__attrname__)
setattr(child, name, value)
def __delattr__(self, name):
child = getattr(serving, self.__attrname__)
delattr(child, name)
def _get_dict(self):
child = getattr(serving, self.__attrname__)
d = child.__class__.__dict__.copy()
d.update(child.__dict__)
return d
__dict__ = property(_get_dict)
def __getitem__(self, key):
child = getattr(serving, self.__attrname__)
return child[key]
def __setitem__(self, key, value):
child = getattr(serving, self.__attrname__)
child[key] = value
def __delitem__(self, key):
child = getattr(serving, self.__attrname__)
del child[key]
def __contains__(self, key):
child = getattr(serving, self.__attrname__)
return key in child
def __len__(self):
child = getattr(serving, self.__attrname__)
return len(child)
def __nonzero__(self):
child = getattr(serving, self.__attrname__)
return bool(child)
# Python 3
__bool__ = __nonzero__
# Create request and response object (the same objects will be used
# throughout the entire life of the webserver, but will redirect
# to the "serving" object)
request = _ThreadLocalProxy('request')
response = _ThreadLocalProxy('response')
# Create thread_data object as a thread-specific all-purpose storage
class _ThreadData(_local):
"""A container for thread-specific data."""
thread_data = _ThreadData()
# Monkeypatch pydoc to allow help() to go through the threadlocal proxy.
# Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve.
# The only other way would be to change what is returned from type(request)
# and that's not possible in pure Python (you'd have to fake ob_type).
def _cherrypy_pydoc_resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, _ThreadLocalProxy):
thing = getattr(serving, thing.__attrname__)
return _pydoc._builtin_resolve(thing, forceload)
try:
import pydoc as _pydoc
_pydoc._builtin_resolve = _pydoc.resolve
_pydoc.resolve = _cherrypy_pydoc_resolve
except ImportError:
pass
from cherrypy import _cplogging
class _GlobalLogManager(_cplogging.LogManager):
"""A site-wide LogManager; routes to app.log or global log as appropriate.
This :class:`LogManager<cherrypy._cplogging.LogManager>` implements
cherrypy.log() and cherrypy.log.access(). If either
function is called during a request, the message will be sent to the
logger for the current Application. If they are called outside of a
request, the message will be sent to the site-wide logger.
"""
def __call__(self, *args, **kwargs):
"""Log the given message to the app.log or global log as appropriate."""
# Do NOT use try/except here. See http://www.cherrypy.org/ticket/945
if hasattr(request, 'app') and hasattr(request.app, 'log'):
log = request.app.log
else:
log = self
return log.error(*args, **kwargs)
def access(self):
"""Log an access message to the app.log or global log as appropriate."""
try:
return request.app.log.access()
except AttributeError:
return _cplogging.LogManager.access(self)
log = _GlobalLogManager()
# Set a default screen handler on the global log.
log.screen = True
log.error_file = ''
# Using an access file makes CP about 10% slower. Leave off by default.
log.access_file = ''
def _buslog(msg, level):
log.error(msg, 'ENGINE', severity=level)
engine.subscribe('log', _buslog)
# Helper functions for CP apps #
def expose(func=None, alias=None):
"""Expose the function, optionally providing an alias or set of aliases."""
def expose_(func):
func.exposed = True
if alias is not None:
if isinstance(alias, basestring):
parents[alias.replace(".", "_")] = func
else:
for a in alias:
parents[a.replace(".", "_")] = func
return func
import sys, types
if isinstance(func, (types.FunctionType, types.MethodType)):
if alias is None:
# @expose
func.exposed = True
return func
else:
# func = expose(func, alias)
parents = sys._getframe(1).f_locals
return expose_(func)
elif func is None:
if alias is None:
# @expose()
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose(alias="alias") or
# @expose(alias=["alias1", "alias2"])
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose("alias") or
# @expose(["alias1", "alias2"])
parents = sys._getframe(1).f_locals
alias = func
return expose_
def popargs(*args, **kwargs):
"""A decorator for _cp_dispatch
(cherrypy.dispatch.Dispatcher.dispatch_method_name).
Optional keyword argument: handler=(Object or Function)
Provides a _cp_dispatch function that pops off path segments into
cherrypy.request.params under the names specified. The dispatch
is then forwarded on to the next vpath element.
Note that any existing (and exposed) member function of the class that
popargs is applied to will override that value of the argument. For
instance, if you have a method named "list" on the class decorated with
popargs, then accessing "/list" will call that function instead of popping
it off as the requested parameter. This restriction applies to all
_cp_dispatch functions. The only way around this restriction is to create
a "blank class" whose only function is to provide _cp_dispatch.
If there are path elements after the arguments, or more arguments
are requested than are available in the vpath, then the 'handler'
keyword argument specifies the next object to handle the parameterized
request. If handler is not specified or is None, then self is used.
If handler is a function rather than an instance, then that function
will be called with the args specified and the return value from that
function used as the next object INSTEAD of adding the parameters to
cherrypy.request.args.
This decorator may be used in one of two ways:
As a class decorator:
@cherrypy.popargs('year', 'month', 'day')
class Blog:
def index(self, year=None, month=None, day=None):
#Process the parameters here; any url like
#/, /2009, /2009/12, or /2009/12/31
#will fill in the appropriate parameters.
def create(self):
#This link will still be available at /create. Defined functions
#take precedence over arguments.
Or as a member of a class:
class Blog:
_cp_dispatch = cherrypy.popargs('year', 'month', 'day')
#...
The handler argument may be used to mix arguments with built in functions.
For instance, the following setup allows different activities at the
day, month, and year level:
class DayHandler:
def index(self, year, month, day):
#Do something with this day; probably list entries
def delete(self, year, month, day):
#Delete all entries for this day
@cherrypy.popargs('day', handler=DayHandler())
class MonthHandler:
def index(self, year, month):
#Do something with this month; probably list entries
def delete(self, year, month):
#Delete all entries for this month
@cherrypy.popargs('month', handler=MonthHandler())
class YearHandler:
def index(self, year):
#Do something with this year
#...
@cherrypy.popargs('year', handler=YearHandler())
class Root:
def index(self):
#...
"""
#Since keyword arg comes after *args, we have to process it ourselves
#for lower versions of python.
handler = None
handler_call = False
for k,v in kwargs.items():
if k == 'handler':
handler = v
else:
raise TypeError(
"cherrypy.popargs() got an unexpected keyword argument '{0}'" \
.format(k)
)
import inspect
if handler is not None \
and (hasattr(handler, '__call__') or inspect.isclass(handler)):
handler_call = True
def decorated(cls_or_self=None, vpath=None):
if inspect.isclass(cls_or_self):
#cherrypy.popargs is a class decorator
cls = cls_or_self
setattr(cls, dispatch.Dispatcher.dispatch_method_name, decorated)
return cls
#We're in the actual function
self = cls_or_self
parms = {}
for arg in args:
if not vpath:
break
parms[arg] = vpath.pop(0)
if handler is not None:
if handler_call:
return handler(**parms)
else:
request.params.update(parms)
return handler
request.params.update(parms)
#If we are the ultimate handler, then to prevent our _cp_dispatch
#from being called again, we will resolve remaining elements through
#getattr() directly.
if vpath:
return getattr(self, vpath.pop(0), None)
else:
return self
return decorated
def url(path="", qs="", script_name=None, base=None, relative=None):
"""Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used
to find a script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute URL
for the current request path (minus the querystring) by passing no args.
If you call url(qs=cherrypy.request.query_string), you should get the
original browser URL (assuming no internal redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
"""
if isinstance(qs, (tuple, list, dict)):
qs = _urlencode(qs)
if qs:
qs = '?' + qs
if request.app:
if not path.startswith("/"):
# Append/remove trailing slash from path_info as needed
# (this is to support mistyped URL's without redirecting;
# if you want to redirect, use tools.trailing_slash).
pi = request.path_info
if request.is_index is True:
if not pi.endswith('/'):
pi = pi + '/'
elif request.is_index is False:
if pi.endswith('/') and pi != '/':
pi = pi[:-1]
if path == "":
path = pi
else:
path = _urljoin(pi, path)
if script_name is None:
script_name = request.script_name
if base is None:
base = request.base
newurl = base + script_name + path + qs
else:
# No request.app (we're being called outside a request).
# We'll have to guess the base from server.* attributes.
# This will produce very different results from the above
# if you're using vhosts or tools.proxy.
if base is None:
base = server.base()
path = (script_name or "") + path
newurl = base + path + qs
if './' in newurl:
# Normalize the URL by removing ./ and ../
atoms = []
for atom in newurl.split('/'):
if atom == '.':
pass
elif atom == '..':
atoms.pop()
else:
atoms.append(atom)
newurl = '/'.join(atoms)
# At this point, we should have a fully-qualified absolute URL.
if relative is None:
relative = getattr(request.app, "relative_urls", False)
# See http://www.ietf.org/rfc/rfc2396.txt
if relative == 'server':
# "A relative reference beginning with a single slash character is
# termed an absolute-path reference, as defined by <abs_path>..."
# This is also sometimes called "server-relative".
newurl = '/' + '/'.join(newurl.split('/', 3)[3:])
elif relative:
# "A relative reference that does not begin with a scheme name
# or a slash character is termed a relative-path reference."
old = url(relative=False).split('/')[:-1]
new = newurl.split('/')
while old and new:
a, b = old[0], new[0]
if a != b:
break
old.pop(0)
new.pop(0)
new = (['..'] * len(old)) + new
newurl = '/'.join(new)
return newurl
# import _cpconfig last so it can reference other top-level objects
from cherrypy import _cpconfig
# Use _global_conf_alias so quickstart can use 'config' as an arg
# without shadowing cherrypy.config.
config = _global_conf_alias = _cpconfig.Config()
config.defaults = {
'tools.log_tracebacks.on': True,
'tools.log_headers.on': True,
'tools.trailing_slash.on': True,
'tools.encode.on': True
}
config.namespaces["log"] = lambda k, v: setattr(log, k, v)
config.namespaces["checker"] = lambda k, v: setattr(checker, k, v)
# Must reset to get our defaults applied.
config.reset()
from cherrypy import _cpchecker
checker = _cpchecker.Checker()
engine.subscribe('start', checker)
| lgpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/test_xpickle.py | 11 | 7929 | # test_pickle dumps and loads pickles via pickle.py.
# test_cpickle does the same, but via the cPickle module.
# This test covers the other two cases, making pickles with one module and
# loading them via the other. It also tests backwards compatibility with
# previous version of Python by bouncing pickled objects through Python 2.4
# and Python 2.5 running this file.
import cPickle
import os
import os.path
import pickle
import subprocess
import sys
import types
import unittest
from test import test_support
# Most distro-supplied Pythons don't include the tests
# or test support files, and some don't include a way to get these back even if
# you're will to install extra packages (like Ubuntu). Doing things like this
# "provides" a pickletester module for older versions of Python that may be
# installed without it. Note that one other design for this involves messing
# with sys.path, which is less precise.
mod_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pickletester.py"))
pickletester = types.ModuleType("test.pickletester")
exec compile(open(mod_path).read(), mod_path, 'exec') in pickletester.__dict__
AbstractPickleTests = pickletester.AbstractPickleTests
if pickletester.__name__ in sys.modules:
raise RuntimeError("Did not expect to find test.pickletester loaded")
sys.modules[pickletester.__name__] = pickletester
class DumpCPickle_LoadPickle(AbstractPickleTests):
error = KeyError
def dumps(self, arg, proto=0, fast=False):
# Ignore fast
return cPickle.dumps(arg, proto)
def loads(self, buf):
# Ignore fast
return pickle.loads(buf)
class DumpPickle_LoadCPickle(AbstractPickleTests):
error = cPickle.BadPickleGet
def dumps(self, arg, proto=0, fast=False):
# Ignore fast
return pickle.dumps(arg, proto)
def loads(self, buf):
# Ignore fast
return cPickle.loads(buf)
def have_python_version(name, cache={}):
"""Check whether the given name is a valid Python binary and has
test.test_support.
This respects your PATH.
Args:
name: short string name of a Python binary such as "python2.4".
Returns:
True if the name is valid, False otherwise.
"""
if name not in cache:
cache[name] = os.system(name + ' -c "import test.test_support"') == 0
return cache[name]
class AbstractCompatTests(AbstractPickleTests):
module = None
python = None
error = None
def setUp(self):
self.assertTrue(self.python)
self.assertTrue(self.module)
self.assertTrue(self.error)
test_support.requires("xpickle")
if not have_python_version(self.python):
self.skipTest('%s not available' % self.python)
def send_to_worker(self, python, obj, proto):
"""Bounce a pickled object through another version of Python.
This will pickle the object, send it to a child process where it will be
unpickled, then repickled and sent back to the parent process.
Args:
python: the name of the Python binary to start.
obj: object to pickle.
proto: pickle protocol number to use.
Returns:
The pickled data received from the child process.
"""
# Prevent the subprocess from picking up invalid .pyc files.
target = __file__
if target[-1] in ("c", "o"):
target = target[:-1]
data = self.module.dumps((proto, obj), proto)
worker = subprocess.Popen([python, target, "worker"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = worker.communicate(data)
if worker.returncode != 0:
raise RuntimeError(stderr)
return stdout
def dumps(self, arg, proto=0, fast=False):
return self.send_to_worker(self.python, arg, proto)
def loads(self, input):
return self.module.loads(input)
# These tests are disabled because they require some special setup
# on the worker that's hard to keep in sync.
test_global_ext1 = None
test_global_ext2 = None
test_global_ext4 = None
# This is a cut-down version of pickletester's test_float. Backwards
# compatibility for the values in for_bin_protos was explicitly broken in
# r68903 to fix a bug.
def test_float(self):
for_bin_protos = [4.94e-324, 1e-310]
neg_for_bin_protos = [-x for x in for_bin_protos]
test_values = [0.0, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_proto0_values = test_values + [-x for x in test_values]
test_values = test_proto0_values + for_bin_protos + neg_for_bin_protos
for value in test_proto0_values:
pickle = self.dumps(value, 0)
got = self.loads(pickle)
self.assertEqual(value, got)
for proto in pickletester.protocols[1:]:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assertEqual(value, got)
# Backwards compatibility was explicitly broken in r67934 to fix a bug.
test_unicode_high_plane = None
# This tests a fix that's in 2.7 only
test_dynamic_class = None
# This is a cut-down version of pickletester's test_unicode. Backwards
# compatibility was explicitly broken in r67934 to fix a bug.
def test_unicode(self):
if not test_support.have_unicode:
# Python 2.5 has no unittest.skipUnless
self.skipTest('no unicode support')
endcases = [u'', u'<\\u>', u'<\\%c>' % 0x1234, u'<\n>', u'<\\>']
for proto in pickletester.protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assertEqual(u2, u)
# Test backwards compatibility with Python 2.4.
class CPicklePython24Compat(AbstractCompatTests):
module = cPickle
python = "python2.4"
error = cPickle.BadPickleGet
# Disable these tests for Python 2.4. Making them pass would require
# nontrivially monkeypatching the pickletester module in the worker.
test_reduce_calls_base = None
test_reduce_ex_calls_base = None
class PicklePython24Compat(CPicklePython24Compat):
module = pickle
error = KeyError
# Test backwards compatibility with Python 2.5.
class CPicklePython25Compat(AbstractCompatTests):
module = cPickle
python = "python2.5"
error = cPickle.BadPickleGet
class PicklePython25Compat(CPicklePython25Compat):
module = pickle
error = KeyError
# Test backwards compatibility with Python 2.6.
class CPicklePython26Compat(AbstractCompatTests):
module = cPickle
python = "python2.6"
error = cPickle.BadPickleGet
class PicklePython26Compat(CPicklePython26Compat):
module = pickle
error = KeyError
class CPicklePython27Compat(AbstractCompatTests):
module = cPickle
python = "python2.7"
error = cPickle.BadPickleGet
class PicklePython27Compat(CPicklePython27Compat):
module = pickle
error = KeyError
def worker_main(in_stream, out_stream):
message = cPickle.load(in_stream)
protocol, obj = message
cPickle.dump(obj, out_stream, protocol)
def test_main():
test_support.run_unittest(
DumpCPickle_LoadPickle,
DumpPickle_LoadCPickle,
CPicklePython24Compat,
CPicklePython25Compat,
CPicklePython26Compat,
CPicklePython27Compat,
PicklePython24Compat,
PicklePython25Compat,
PicklePython26Compat,
PicklePython27Compat,
)
if __name__ == "__main__":
if "worker" in sys.argv:
worker_main(sys.stdin, sys.stdout)
else:
test_main()
| mit |
cessor/galena | src/galena/stopwords.py | 1 | 1030 | from .corpus import *
from .document import *
import itertools
class Stopwords(object):
def __init__(self, stopwords):
self._stopwords = stopwords
def remove_from(self, words):
for word in words:
if not word:
continue
if word not in self._stopwords:
yield word
class Lexicon(object):
def __init__(self, *iterables):
self._iterables = list(itertools.chain(*iterables))
def __contains__(self, item):
return item in self._iterables
class NltkStopwords(object):
def __iter__(self):
from nltk.corpus import stopwords
return stopwords.words('english').__iter__()
class StopwordsFolder(object):
def __init__(self, directory):
self._directory = directory
def __iter__(self):
for file in self._directory:
for line in Lines(file.content()):
for word in Words(line):
if word:
yield word.strip().lower()
| lgpl-3.0 |
chouseknecht/ansible | lib/ansible/plugins/inventory/docker_swarm.py | 37 | 11922 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: docker_swarm
plugin_type: inventory
version_added: '2.8'
author:
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
requirements:
- python >= 2.7
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
extends_documentation_fragment:
- constructed
description:
- Reads inventories from the Docker swarm API.
- Uses a YAML configuration file docker_swarm.[yml|yaml].
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
I(managers) - all manager nodes; I(leader) - the swarm leader node;
I(nonleaders) - all nodes except the swarm leader."
options:
plugin:
description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
recognize it as it's own.
type: str
required: true
choices: docker_swarm
docker_host:
description:
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
- "Use C(unix://var/run/docker.sock) to connect via local socket."
type: str
required: true
aliases: [ docker_url ]
verbose_output:
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
C(EngineVersion))
type: bool
default: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
type: bool
default: no
validate_certs:
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
host server.
type: bool
default: no
aliases: [ tls_verify ]
client_key:
description: Path to the client's TLS key file.
type: path
aliases: [ tls_client_key, key_path ]
ca_cert:
description: Use a CA certificate when performing server verification by providing the path to a CA
certificate file.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description: Path to the client's TLS certificate file.
type: path
aliases: [ tls_client_cert, cert_path ]
tls_hostname:
description: When verifying the authenticity of the Docker host server, provide the expected name of
the server.
type: str
ssl_version:
description: Provide a valid SSL version number. Default value determined by ssl.py module.
type: str
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by docker-py.
type: str
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
will be used instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
aliases: [ time_out ]
include_host_uri:
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
The port always defaults to C(2376).
type: bool
default: no
include_host_uri_port:
description: Override the detected port number included in I(ansible_host_uri)
type: int
'''
EXAMPLES = '''
# Minimal example using local docker
plugin: docker_swarm
docker_host: unix://var/run/docker.sock
# Minimal example using remote docker
plugin: docker_swarm
docker_host: tcp://my-docker-host:2375
# Example using remote docker with unverified TLS
plugin: docker_swarm
docker_host: tcp://my-docker-host:2376
tls: yes
# Example using remote docker with verified TLS and client certificate verification
plugin: docker_swarm
docker_host: tcp://my-docker-host:2376
validate_certs: yes
ca_cert: /somewhere/ca.pem
client_key: /somewhere/key.pem
client_cert: /somewhere/cert.pem
# Example using constructed features to create groups and set ansible_host
plugin: docker_swarm
docker_host: tcp://my-docker-host:2375
strict: False
keyed_groups:
# add e.g. x86_64 hosts to an arch_x86_64 group
- prefix: arch
key: 'Description.Platform.Architecture'
# add e.g. linux hosts to an os_linux group
- prefix: os
key: 'Description.Platform.OS'
# create a group per node label
# e.g. a node labeled w/ "production" ends up in group "label_production"
# hint: labels containing special characters will be converted to safe names
- key: 'Spec.Labels'
prefix: label
'''
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.docker.common import update_tls_hostname, get_connect_params
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.parsing.utils.addresses import parse_address
try:
import docker
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using Docker swarm as source. '''
NAME = 'docker_swarm'
def _fail(self, msg):
raise AnsibleError(msg)
def _populate(self):
raw_params = dict(
docker_host=self.get_option('docker_host'),
tls=self.get_option('tls'),
tls_verify=self.get_option('validate_certs'),
key_path=self.get_option('client_key'),
cacert_path=self.get_option('ca_cert'),
cert_path=self.get_option('client_cert'),
tls_hostname=self.get_option('tls_hostname'),
api_version=self.get_option('api_version'),
timeout=self.get_option('timeout'),
ssl_version=self.get_option('ssl_version'),
debug=None,
)
update_tls_hostname(raw_params)
connect_params = get_connect_params(raw_params, fail_function=self._fail)
self.client = docker.DockerClient(**connect_params)
self.inventory.add_group('all')
self.inventory.add_group('manager')
self.inventory.add_group('worker')
self.inventory.add_group('leader')
self.inventory.add_group('nonleaders')
if self.get_option('include_host_uri'):
if self.get_option('include_host_uri_port'):
host_uri_port = str(self.get_option('include_host_uri_port'))
elif self.get_option('tls') or self.get_option('validate_certs'):
host_uri_port = '2376'
else:
host_uri_port = '2375'
try:
self.nodes = self.client.nodes.list()
for self.node in self.nodes:
self.node_attrs = self.client.nodes.get(self.node.id).attrs
self.inventory.add_host(self.node_attrs['ID'])
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
self.node_attrs['Status']['Addr'])
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
if self.get_option('verbose_output'):
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
if 'ManagerStatus' in self.node_attrs:
if self.node_attrs['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
self.node_attrs['Status']['Addr']
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
self.inventory.add_host(self.node_attrs['ID'], group='leader')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
to_native(e))
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
def parse(self, inventory, loader, path, cache=True):
if not HAS_DOCKER:
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
'https://github.com/docker/docker-py.')
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._populate()
| gpl-3.0 |
deejross/python3-pywbem | lex.py | 1 | 39948 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley (dave@dabeaz.com)
#
# Copyright (C) 2001-2009, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
# -----------------------------------------------------------------------------
__version__ = "3.0"
__tabversion__ = "3.0" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| lgpl-2.1 |
samueldotj/TeeRISC-Simulator | src/arch/x86/isa/insts/simd128/integer/shift/right_logical_shift.py | 91 | 4565 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PSRLW_XMM_XMM {
msrl xmmh, xmmh, xmmlm, size=2, ext=0
msrl xmml, xmml, xmmlm, size=2, ext=0
};
def macroop PSRLW_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
msrl xmml, xmml, ufp1, size=2, ext=0
msrl xmmh, xmmh, ufp1, size=2, ext=0
};
def macroop PSRLW_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
msrl xmml, xmml, ufp1, size=2, ext=0
msrl xmmh, xmmh, ufp1, size=2, ext=0
};
def macroop PSRLW_XMM_I {
msrli xmml, xmml, imm, size=2, ext=0
msrli xmmh, xmmh, imm, size=2, ext=0
};
def macroop PSRLD_XMM_XMM {
msrl xmmh, xmmh, xmmlm, size=4, ext=0
msrl xmml, xmml, xmmlm, size=4, ext=0
};
def macroop PSRLD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
msrl xmml, xmml, ufp1, size=4, ext=0
msrl xmmh, xmmh, ufp1, size=4, ext=0
};
def macroop PSRLD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
msrl xmml, xmml, ufp1, size=4, ext=0
msrl xmmh, xmmh, ufp1, size=4, ext=0
};
def macroop PSRLD_XMM_I {
msrli xmml, xmml, imm, size=4, ext=0
msrli xmmh, xmmh, imm, size=4, ext=0
};
def macroop PSRLQ_XMM_XMM {
msrl xmmh, xmmh, xmmlm, size=8, ext=0
msrl xmml, xmml, xmmlm, size=8, ext=0
};
def macroop PSRLQ_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
msrl xmml, xmml, ufp1, size=8, ext=0
msrl xmmh, xmmh, ufp1, size=8, ext=0
};
def macroop PSRLQ_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
msrl xmml, xmml, ufp1, size=8, ext=0
msrl xmmh, xmmh, ufp1, size=8, ext=0
};
def macroop PSRLQ_XMM_I {
msrli xmml, xmml, imm, size=8, ext=0
msrli xmmh, xmmh, imm, size=8, ext=0
};
def macroop PSRLDQ_XMM_I {
limm t2, 8
subi t1, t2, imm, flags=(ECF,), dataSize=1
br label("psrldq_less_8"), flags=(nCECF,)
# Greater than 8
limm t2, 16
subi t1, t2, imm, flags=(ECF,), dataSize=1
br label("psrldq_less_16"), flags=(nCECF,)
# Greater than 16
lfpimm xmml, 0
lfpimm xmmh, 0
br label("psrldq_end")
psrldq_less_16:
# Between 8 and 16
msrli xmml, xmmh, "(IMMEDIATE-8)<<3", size=8, ext=0
lfpimm xmmh, 0
br label("psrldq_end")
psrldq_less_8:
# Less than 8
mslli ufp1, xmmh, "(8-IMMEDIATE) << 3", size=8, ext=0
msrli xmml, xmml, "IMMEDIATE << 3", size=8, ext=0
msrli xmmh, xmmh, "IMMEDIATE << 3", size=8, ext=0
mor xmml, xmml, ufp1
psrldq_end:
fault "NoFault"
};
'''
| bsd-3-clause |
joernhees/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
uclaros/QGIS | tests/src/python/test_qgsdatumtransforms.py | 10 | 18935 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDatumTransforms.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2019-05-25'
__copyright__ = 'Copyright 2019, The QGIS Project'
from qgis.core import (
QgsProjUtils,
QgsCoordinateReferenceSystem,
QgsDatumTransform
)
from qgis.testing import (start_app,
unittest,
)
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsDatumTransform(unittest.TestCase):
@unittest.skipIf(QgsProjUtils.projVersionMajor() < 6, 'Not a proj6 build')
def testOperations(self):
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem(),
QgsCoordinateReferenceSystem())
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem())
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem(),
QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=noop')
self.assertEqual(ops[0].accuracy, 0.0)
self.assertTrue(ops[0].isAvailable)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:4283'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[0].accuracy, -1.0)
self.assertTrue(ops[0].isAvailable)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:28355'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=utm +zone=55 +south +ellps=GRS80')
self.assertEqual(ops[0].accuracy, 0.0)
self.assertTrue(ops[0].isAvailable)
# uses a grid file
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:4283'),
QgsCoordinateReferenceSystem('EPSG:7844'))
self.assertGreaterEqual(len(ops), 5)
op1_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
self.assertTrue(ops[op1_index].name)
self.assertEqual(ops[op1_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertTrue(ops[op1_index].isAvailable)
self.assertEqual(ops[op1_index].accuracy, 0.01)
self.assertEqual(len(ops[op1_index].grids), 0)
if QgsProjUtils.projVersionMajor() == 6:
op2_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op2_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op2_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op2_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op2_index].accuracy, 0.05)
self.assertEqual(len(ops[op2_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].grids[0].shortName, 'GDA94_GDA2020_conformal_and_distortion.gsb')
else:
self.assertEqual(ops[op2_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_and_distortion.tif')
self.assertEqual(ops[op2_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op2_index].grids[0].packageName)
self.assertIn('http', ops[op2_index].grids[0].url)
self.assertTrue(ops[op2_index].grids[0].directDownload)
self.assertTrue(ops[op2_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op3_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op3_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op3_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op3_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op3_index].accuracy, 0.05)
self.assertEqual(len(ops[op3_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].grids[0].shortName, 'GDA94_GDA2020_conformal.gsb')
else:
self.assertEqual(ops[op3_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal.tif')
self.assertEqual(ops[op3_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op3_index].grids[0].packageName)
self.assertIn('http', ops[op3_index].grids[0].url)
self.assertTrue(ops[op3_index].grids[0].directDownload)
self.assertTrue(ops[op3_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op4_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_cocos_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op4_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_cocos_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op4_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op4_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_cocos_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op4_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_cocos_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op4_index].accuracy, 0.05)
self.assertEqual(len(ops[op4_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op4_index].grids[0].shortName, 'GDA94_GDA2020_conformal_cocos_island.gsb')
else:
self.assertEqual(ops[op4_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_cocos_island.tif')
self.assertEqual(ops[op4_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op4_index].grids[0].packageName)
self.assertIn('http', ops[op4_index].grids[0].url)
if QgsProjUtils.projVersionMajor() == 6:
op5_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_christmas_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op5_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_christmas_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op5_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op5_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_christmas_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op5_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_christmas_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op5_index].accuracy, 0.05)
self.assertEqual(len(ops[op5_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op5_index].grids[0].shortName, 'GDA94_GDA2020_conformal_christmas_island.gsb')
else:
self.assertEqual(ops[op5_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_christmas_island.tif')
self.assertEqual(ops[op5_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op5_index].grids[0].packageName)
self.assertIn('http', ops[op5_index].grids[0].url)
# uses a pivot datum (technically a proj test, but this will help me sleep at night ;)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:7899'))
self.assertGreaterEqual(len(ops), 3)
op1_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
self.assertTrue(ops[op1_index].name)
self.assertEqual(ops[op1_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertTrue(ops[op1_index].isAvailable)
self.assertEqual(ops[op1_index].accuracy, 0.01)
self.assertEqual(len(ops[op1_index].grids), 0)
if QgsProjUtils.projVersionMajor() == 6:
op2_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
else:
op2_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][
0]
self.assertTrue(ops[op2_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
else:
self.assertEqual(ops[op2_index].proj,
'+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertEqual(ops[op2_index].accuracy, 0.05)
self.assertEqual(len(ops[op2_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].grids[0].shortName, 'GDA94_GDA2020_conformal_and_distortion.gsb')
else:
self.assertEqual(ops[op2_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_and_distortion.tif')
self.assertEqual(ops[op2_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op2_index].grids[0].packageName)
self.assertIn('http', ops[op2_index].grids[0].url)
self.assertTrue(ops[op2_index].grids[0].directDownload)
self.assertTrue(ops[op2_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op3_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
else:
op3_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][
0]
self.assertTrue(ops[op3_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
else:
self.assertEqual(ops[op3_index].proj,
'+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertEqual(ops[op3_index].accuracy, 0.05)
self.assertEqual(len(ops[op3_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].grids[0].shortName, 'GDA94_GDA2020_conformal.gsb')
else:
self.assertEqual(ops[op3_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal.tif')
self.assertEqual(ops[op3_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op3_index].grids[0].packageName)
self.assertIn('http', ops[op3_index].grids[0].url)
self.assertTrue(ops[op3_index].grids[0].directDownload)
self.assertTrue(ops[op3_index].grids[0].openLicense)
@unittest.skipIf(QgsProjUtils.projVersionMajor() < 7, 'Not a proj >= 7 build')
def testNoLasLos(self):
"""
Test that operations which rely on an NADCON5 grid shift file (which are unsupported by Proj... at time of writing !) are not returned
"""
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:4138'),
QgsCoordinateReferenceSystem('EPSG:4269'))
self.assertEqual(len(ops), 2)
self.assertTrue(ops[0].name)
self.assertTrue(ops[0].proj)
self.assertTrue(ops[1].name)
self.assertTrue(ops[1].proj)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
juanantoniofm/accesible-moodle | fabtools/openvz/container.py | 16 | 2193 | """
OpenVZ containers
=================
"""
from fabtools import openvz as vz
class Container(object):
"""
Object-oriented interface to OpenVZ containers.
"""
def __init__(self, ctid):
self.ctid = ctid
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return None
def create(self, **kwargs):
"""
Create the container.
Extra args are passed to :py:func:`fabtools.openvz.create`.
"""
return vz.create(self.ctid, **kwargs)
def destroy(self):
"""
Destroy the container.
"""
return vz.destroy(self.ctid)
def set(self, **kwargs):
"""
Set container parameters.
Extra args are passed to :py:func:`fabtools.openvz.set`.
"""
return vz.set(self.ctid, **kwargs)
def start(self, **kwargs):
"""
Start the container.
Extra args are passed to :py:func:`fabtools.openvz.start`.
"""
return vz.start(self.ctid, **kwargs)
def stop(self, **kwargs):
"""
Stop the container.
Extra args are passed to :py:func:`fabtools.openvz.stop`.
"""
return vz.stop(self.ctid, **kwargs)
def restart(self, **kwargs):
"""
Restart the container.
Extra args are passed to :py:func:`fabtools.openvz.restart`.
"""
return vz.restart(self.ctid, **kwargs)
def status(self):
"""
Get the container's status.
"""
return vz.status(self.ctid)
def running(self):
"""
Check if the container is running.
"""
return vz.running(self.ctid)
def exists(self):
"""
Check if the container exists.
"""
return vz.exists(self.ctid)
def exec2(self, command):
"""
Run a command inside the container.
::
from fabtools.require.openvz import container
with container('foo') as ct:
res = ct.exec2('hostname')
.. warning:: the command will be run as **root**.
"""
return vz.exec2(self.ctid, command)
| gpl-2.0 |
krishnazure/Flask | Work/Trivia - Module 5/env/Lib/site-packages/jinja2/optimizer.py | 1401 | 2302 | # -*- coding: utf-8 -*-
"""
jinja2.optimizer
~~~~~~~~~~~~~~~~
The jinja optimizer is currently trying to constant fold a few expressions
and modify the AST in place so that it should be easier to evaluate it.
Because the AST does not contain all the scoping information and the
compiler has to find that out, we cannot do all the optimizations we
want. For example loop unrolling doesn't work because unrolled loops would
have a different scoping.
The solution would be a second syntax tree that has the scoping rules stored.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.visitor import NodeTransformer
def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node)
class Optimizer(NodeTransformer):
def __init__(self, environment):
self.environment = environment
def visit_If(self, node):
"""Eliminate dead code."""
# do not optimize ifs that have a block inside so that it doesn't
# break super().
if node.find(nodes.Block) is not None:
return self.generic_visit(node)
try:
val = self.visit(node.test).as_const()
except nodes.Impossible:
return self.generic_visit(node)
if val:
body = node.body
else:
body = node.else_
result = []
for node in body:
result.extend(self.visit_list(node))
return result
def fold(self, node):
"""Do constant folding."""
node = self.generic_visit(node)
try:
return nodes.Const.from_untrusted(node.as_const(),
lineno=node.lineno,
environment=self.environment)
except nodes.Impossible:
return node
visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
visit_Filter = visit_Test = visit_CondExpr = fold
del fold
| apache-2.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/unidecode/x00b.py | 252 | 4132 | data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'e', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'o', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'\'', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'e', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'o', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'+', # 0x56
'+', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'rr', # 0x5c
'rh', # 0x5d
'[?]', # 0x5e
'yy', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'ng', # 0x99
'c', # 0x9a
'[?]', # 0x9b
'j', # 0x9c
'[?]', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'nn', # 0xa3
't', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'n', # 0xa8
'nnn', # 0xa9
'p', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'lll', # 0xb4
'v', # 0xb5
'[?]', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'+', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'+10+', # 0xf0
'+100+', # 0xf1
'+1000+', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_tag_operations.py | 1 | 86989 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TagOperations(object):
"""TagOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists all Tags associated with the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags'} # type: ignore
def get_entity_state_by_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def get_by_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Get tag associated with the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def assign_to_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Assign tag to the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.assign_to_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
assign_to_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def detach_from_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Detach the tag from the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.detach_from_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def list_by_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists all Tags associated with the API.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags'} # type: ignore
def get_entity_state_by_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def get_by_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Get tag associated with the API.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def assign_to_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Assign tag to the Api.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.assign_to_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
assign_to_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def detach_from_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Detach the tag from the Api.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.detach_from_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def list_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists all Tags associated with the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags'} # type: ignore
def get_entity_state_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def get_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Get tag associated with the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def assign_to_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Assign tag to the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.assign_to_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
assign_to_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def detach_from_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Detach the tag from the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.detach_from_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def list_by_service(
self,
resource_group_name, # type: str
service_name, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
scope=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists a collection of tags defined within a service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
displayName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param scope: Scope like 'apis', 'products' or 'apis/{apiId}.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
if scope is not None:
query_parameters['scope'] = self._serialize.query("scope", scope, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags'} # type: ignore
def get_entity_state(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Gets the details of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
parameters, # type: "_models.TagCreateUpdateParameters"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Creates a tag.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param parameters: Create parameters.
:type parameters: ~azure.mgmt.apimanagement.models.TagCreateUpdateParameters
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def update(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
if_match, # type: str
parameters, # type: "_models.TagCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Updates the details of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.TagCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes specific tag of the API Management service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/tests/parser/test_textreader.py | 7 | 12917 | # -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
from pandas.compat import StringIO, BytesIO, map
from pandas import compat
import os
import sys
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame
from pandas.io.parsers import (read_csv, TextFileReader)
from pandas.util.testing import assert_frame_equal
import pandas.util.testing as tm
from pandas.parser import TextReader
import pandas.parser as parser
class TestTextReader(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f)
result = reader.read() # noqa
finally:
f.close()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f, memory_map=True, header=None)
reader.read()
finally:
f.close()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(len(set(map(id, result[0]))), 2)
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0],
np.array(['a', 'a', 'a', 'a'],
dtype=np.object_))
self.assert_numpy_array_equal(result[1],
np.array(['b', 'b', 'b', 'b'],
dtype=np.object_))
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(result[0].dtype, np.bool_)
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'],
dtype=np.object_))
self.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'],
dtype=np.object_))
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_)
self.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
self.assertRaises(parser.CParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: ['a', 'd', 'g', 'l'],
1: ['b', 'e', 'h', 'm'],
2: ['c', 'f', 'i', 'n']}
assert_array_dicts_equal(result, expected)
stderr = sys.stderr
sys.stderr = StringIO()
try:
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
self.assertTrue('Skipping line 4' in val)
self.assertTrue('Skipping line 6' in val)
finally:
sys.stderr = stderr
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {0: [1, 4], 1: [2, 5], 2: [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_header_not_enough_lines_as_recarray(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: ['"hello world"'] * 3}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S5')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'i4')
reader = _make_reader(dtype='S4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'S4')
def test_numpy_string_dtype_as_recarray(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S4', as_recarray=True)
result = reader.read()
self.assertEqual(result['0'].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result['0'] == ex_values).all())
self.assertEqual(result['1'].dtype, 'S4')
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'S1')
reader = _make_reader(dtype={'one': np.uint8, 1: object})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
reader = _make_reader(dtype={'one': np.dtype('u1'),
1: np.dtype('O')})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
self.assertEqual(len(result), 2)
self.assertTrue((result[1] == exp[1]).all())
self.assertTrue((result[2] == exp[2]).all())
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace('\r', '\r\n')
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12'
_test(data, delimiter=',')
data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12'
_test(data, delim_whitespace=True)
data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12'
_test(data, delimiter=',')
sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r'
'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0')
_test(sample, delimiter=',')
data = 'A B C\r 2 3\r4 5 6'
_test(data, delim_whitespace=True)
data = 'A B C\r2 3\r4 5 6'
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = 'a,b,c\n1,2,3\n4,,'
result = TextReader(StringIO(data), delimiter=',').read()
expected = {0: np.array([1, 4]),
1: np.array(['2', ''], dtype=object),
2: np.array(['3', ''], dtype=object)}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c'])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]],
columns=list('abcd'),
index=[1, 1])
c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan],
[8, 9, 10, 11], [13, 14, nan, nan]],
columns=list('abcd'),
index=[0, 5, 7, 12])
for _ in range(100):
df = read_csv(StringIO('a,b\nc\n'), skiprows=0,
names=['a'], engine='c')
assert_frame_equal(df, a)
df = read_csv(StringIO('1,1,1,1,0\n' * 2 + '\n' * 2),
names=list("abcd"), engine='c')
assert_frame_equal(df, b)
df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'),
names=list('abcd'), engine='c')
assert_frame_equal(df, c)
def assert_array_dicts_equal(left, right):
for k, v in compat.iteritems(left):
assert(np.array_equal(v, right[k]))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
jmmease/pandas | asv_bench/benchmarks/reindex.py | 7 | 6523 | from .pandas_vb_common import *
from random import shuffle
class Reindexing(object):
goal_time = 0.2
def setup(self):
self.rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min')
self.df = DataFrame(np.random.rand(10000, 10), index=self.rng,
columns=range(10))
self.df['foo'] = 'bar'
self.rng2 = Index(self.rng[::2])
self.df2 = DataFrame(index=range(10000),
data=np.random.rand(10000, 30), columns=range(30))
# multi-index
N = 5000
K = 200
level1 = tm.makeStringIndex(N).values.repeat(K)
level2 = np.tile(tm.makeStringIndex(K).values, N)
index = MultiIndex.from_arrays([level1, level2])
self.s1 = Series(np.random.randn((N * K)), index=index)
self.s2 = self.s1[::2]
def time_reindex_dates(self):
self.df.reindex(self.rng2)
def time_reindex_columns(self):
self.df2.reindex(columns=self.df.columns[1:5])
def time_reindex_multiindex(self):
self.s1.reindex(self.s2.index)
#----------------------------------------------------------------------
# Pad / backfill
class FillMethod(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=100000, freq='1min')
self.ts = Series(np.random.randn(len(self.rng)), index=self.rng)
self.ts2 = self.ts[::2]
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
def pad(self, source_series, target_index):
try:
source_series.reindex(target_index, method='pad')
except:
source_series.reindex(target_index, fillMethod='pad')
def backfill(self, source_series, target_index):
try:
source_series.reindex(target_index, method='backfill')
except:
source_series.reindex(target_index, fillMethod='backfill')
def time_backfill_dates(self):
self.backfill(self.ts2, self.ts.index)
def time_pad_daterange(self):
self.pad(self.ts2, self.ts.index)
def time_backfill(self):
self.ts3.fillna(method='backfill')
def time_backfill_float32(self):
self.ts4.fillna(method='backfill')
def time_pad(self):
self.ts3.fillna(method='pad')
def time_pad_float32(self):
self.ts4.fillna(method='pad')
#----------------------------------------------------------------------
# align on level
class LevelAlign(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(
levels=[np.arange(10), np.arange(100), np.arange(100)],
labels=[np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4),
index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4),
index=self.index.levels[1])
def time_align_level(self):
self.df.align(self.df_level, level=1, copy=False)
def time_reindex_level(self):
self.df_level.reindex(self.df.index, level=1)
#----------------------------------------------------------------------
# drop_duplicates
class Duplicates(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.K = 10
self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.df = DataFrame({'key1': self.key1, 'key2': self.key2,
'value': np.random.randn((self.N * self.K)),})
self.col_array_list = list(self.df.values.T)
self.df2 = self.df.copy()
self.df2.ix[:10000, :] = np.nan
self.s = Series(np.random.randint(0, 1000, size=10000))
self.s2 = Series(np.tile(tm.makeStringIndex(1000).values, 10))
np.random.seed(1234)
self.N = 1000000
self.K = 10000
self.key1 = np.random.randint(0, self.K, size=self.N)
self.df_int = DataFrame({'key1': self.key1})
self.df_bool = DataFrame({i: np.random.randint(0, 2, size=self.K,
dtype=bool)
for i in range(10)})
def time_frame_drop_dups(self):
self.df.drop_duplicates(['key1', 'key2'])
def time_frame_drop_dups_inplace(self):
self.df.drop_duplicates(['key1', 'key2'], inplace=True)
def time_frame_drop_dups_na(self):
self.df2.drop_duplicates(['key1', 'key2'])
def time_frame_drop_dups_na_inplace(self):
self.df2.drop_duplicates(['key1', 'key2'], inplace=True)
def time_series_drop_dups_int(self):
self.s.drop_duplicates()
def time_series_drop_dups_string(self):
self.s2.drop_duplicates()
def time_frame_drop_dups_int(self):
self.df_int.drop_duplicates()
def time_frame_drop_dups_bool(self):
self.df_bool.drop_duplicates()
#----------------------------------------------------------------------
# blog "pandas escaped the zoo"
class Align(object):
goal_time = 0.2
def setup(self):
n = 50000
indices = tm.makeStringIndex(n)
subsample_size = 40000
def sample(values, k):
sampler = np.arange(len(values))
shuffle(sampler)
return values.take(sampler[:k])
self.x = Series(np.random.randn(50000), indices)
self.y = Series(np.random.randn(subsample_size),
index=sample(indices, subsample_size))
def time_align_series_irregular_string(self):
(self.x + self.y)
class LibFastZip(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.K = 10
self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.df = DataFrame({'key1': self.key1, 'key2': self.key2, 'value': np.random.randn((self.N * self.K)), })
self.col_array_list = list(self.df.values.T)
self.df2 = self.df.copy()
self.df2.ix[:10000, :] = np.nan
self.col_array_list2 = list(self.df2.values.T)
def time_lib_fast_zip(self):
lib.fast_zip(self.col_array_list)
def time_lib_fast_zip_fillna(self):
lib.fast_zip_fillna(self.col_array_list2)
| bsd-3-clause |
gem/geonode | geonode/groups/urls.py | 14 | 2422 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import GroupDetailView, GroupActivityView
urlpatterns = patterns('geonode.groups.views',
url(r'^$', TemplateView.as_view(template_name='groups/group_list.html'), name="group_list"),
url(r'^create/$', 'group_create', name="group_create"),
url(r'^group/(?P<slug>[-\w]+)/$', GroupDetailView.as_view(), name='group_detail'),
url(r'^group/(?P<slug>[-\w]+)/update/$', 'group_update', name='group_update'),
url(r'^group/(?P<slug>[-\w]+)/members/$', 'group_members', name='group_members'),
url(r'^group/(?P<slug>[-\w]+)/invite/$', 'group_invite', name='group_invite'),
url(r'^group/(?P<slug>[-\w]+)/members_add/$', 'group_members_add', name='group_members_add'),
url(r'^group/(?P<slug>[-\w]+)/member_remove/(?P<username>.+)$', 'group_member_remove',
name='group_member_remove'),
url(r'^group/(?P<slug>[-\w]+)/remove/$', 'group_remove', name='group_remove'),
url(r'^group/(?P<slug>[-\w]+)/join/$', 'group_join', name='group_join'),
url(r'^group/[-\w]+/invite/(?P<token>[\w]{40})/$', 'group_invite_response',
name='group_invite_response'),
url(r'^group/(?P<slug>[-\w]+)/activity/$', GroupActivityView.as_view(), name='group_activity'),
)
| gpl-3.0 |
Thoshh/wapad | lib/python2.7/site-packages/django/db/backends/sqlite3/base.py | 323 | 18115 | """
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import re
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.safestring import SafeBytes
try:
import pytz
except ImportError:
pytz = None
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
# Some of these import sqlite3, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The SQLite database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime))
Database.register_converter(str("timestamp"), decoder(parse_datetime))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_warn_on_aware_datetime)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db(self.settings_dict['NAME']):
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self, name):
return name == ":memory:" or "mode=memory" in force_text(name)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, six.integer_types):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, six.integer_types):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| mit |
liangazhou/django-rdp | packages/Django-1.8.6/django/contrib/sessions/backends/base.py | 99 | 11451 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _get_session_key(self):
return self._session_key
session_key = property(_get_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| apache-2.0 |
gVallverdu/pymatgen | pymatgen/util/provenance.py | 2 | 14104 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes and methods related to the Structure Notation Language (SNL)
"""
import sys
import re
import datetime
from collections import namedtuple
import json
from io import StringIO
from monty.json import MontyDecoder, MontyEncoder
from monty.string import remove_non_ascii
from pymatgen.core.structure import Structure, Molecule
from pybtex.database.input import bibtex
from pybtex import errors
__author__ = 'Anubhav Jain, Shyue Ping Ong'
__credits__ = 'Dan Gunter'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Feb 11, 2013'
MAX_HNODE_SIZE = 64000 # maximum size (bytes) of SNL HistoryNode
MAX_DATA_SIZE = 256000 # maximum size (bytes) of SNL data field
MAX_HNODES = 100 # maximum number of HistoryNodes in SNL file
MAX_BIBTEX_CHARS = 20000 # maximum number of characters for BibTeX reference
def is_valid_bibtex(reference):
"""
Use pybtex to validate that a reference is in proper BibTeX format
Args:
reference: A String reference in BibTeX format.
Returns:
Boolean indicating if reference is valid bibtex.
"""
# str is necessary since pybtex seems to have an issue with unicode. The
# filter expression removes all non-ASCII characters.
sio = StringIO(remove_non_ascii(reference))
parser = bibtex.Parser()
errors.set_strict_mode(False)
bib_data = parser.parse_stream(sio)
return len(bib_data.entries) > 0
class HistoryNode(namedtuple('HistoryNode', ['name', 'url', 'description'])):
"""
A HistoryNode represents a step in the chain of events that lead to a
Structure. HistoryNodes leave 'breadcrumbs' so that you can trace back how
a Structure was created. For example, a HistoryNode might represent pulling
a Structure from an external database such as the ICSD or CSD. Or, it might
represent the application of a code (e.g. pymatgen) to the Structure, with
a custom description of how that code was applied (e.g. a site removal
Transformation was applied).
A HistoryNode contains three fields:
.. attribute:: name
The name of a code or resource that this Structure encountered in
its history (String)
.. attribute:: url
The URL of that code/resource (String)
.. attribute:: description
A free-form description of how the code/resource is related to the
Structure (dict).
"""
def as_dict(self):
"""
Returns: Dict
"""
return {"name": self.name, "url": self.url,
"description": self.description}
@staticmethod
def from_dict(h_node):
"""
Args:
d (dict): Dict representation
Returns:
HistoryNode
"""
return HistoryNode(h_node['name'], h_node['url'],
h_node['description'])
@staticmethod
def parse_history_node(h_node):
"""
Parses a History Node object from either a dict or a tuple.
Args:
h_node: A dict with name/url/description fields or a 3-element
tuple.
Returns:
History node.
"""
if isinstance(h_node, dict):
return HistoryNode.from_dict(h_node)
else:
if len(h_node) != 3:
raise ValueError("Invalid History node, "
"should be dict or (name, version, "
"description) tuple: {}".format(h_node))
return HistoryNode(h_node[0], h_node[1], h_node[2])
class Author(namedtuple('Author', ['name', 'email'])):
"""
An Author contains two fields:
.. attribute:: name
Name of author (String)
.. attribute:: email
Email of author (String)
"""
def __str__(self):
"""
String representation of an Author
"""
return '{} <{}>'.format(self.name, self.email)
def as_dict(self):
"""
Returns: MSONable dict.
"""
return {"name": self.name, "email": self.email}
@staticmethod
def from_dict(d):
"""
Args:
d (dict): Dict representation
Returns:
Author
"""
return Author(d['name'], d['email'])
@staticmethod
def parse_author(author):
"""
Parses an Author object from either a String, dict, or tuple
Args:
author: A String formatted as "NAME <email@domain.com>",
(name, email) tuple, or a dict with name and email keys.
Returns:
An Author object.
"""
if isinstance(author, str):
# Regex looks for whitespace, (any name), whitespace, <, (email),
# >, whitespace
m = re.match(r'\s*(.*?)\s*<(.*?@.*?)>\s*', author)
if not m or m.start() != 0 or m.end() != len(author):
raise ValueError("Invalid author format! {}".format(author))
return Author(m.groups()[0], m.groups()[1])
elif isinstance(author, dict):
return Author.from_dict(author)
else:
if len(author) != 2:
raise ValueError("Invalid author, should be String or (name, "
"email) tuple: {}".format(author))
return Author(author[0], author[1])
class StructureNL:
"""
The Structure Notation Language (SNL, pronounced 'snail') is container
for a pymatgen Structure/Molecule object with some additional fields for
enhanced provenance. It is meant to be imported/exported in a JSON file
format with the following structure:
- about
- created_at
- authors
- projects
- references
- remarks
- data
- history
- lattice (optional)
- sites
"""
def __init__(self, struct_or_mol, authors, projects=None, references='',
remarks=None, data=None, history=None, created_at=None):
"""
Args:
struct_or_mol: A pymatgen.core.structure Structure/Molecule object
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects: List of Strings ['Project A', 'Project B']
references: A String in BibTeX format
remarks: List of Strings ['Remark A', 'Remark B']
data: A free form dict. Namespaced at the root level with an
underscore, e.g. {"_materialsproject": <custom data>}
history: List of dicts - [{'name':'', 'url':'', 'description':{}}]
created_at: A datetime object
"""
# initialize root-level structure keys
self.structure = struct_or_mol
# turn authors into list of Author objects
authors = authors.split(',')\
if isinstance(authors, str) else authors
self.authors = [Author.parse_author(a) for a in authors]
# turn projects into list of Strings
projects = projects if projects else []
self.projects = [projects] if isinstance(projects, str) else projects
# check that references are valid BibTeX
if not isinstance(references, str):
raise ValueError("Invalid format for SNL reference! Should be "
"empty string or BibTeX string.")
if references and not is_valid_bibtex(references):
raise ValueError("Invalid format for SNL reference! Should be "
"BibTeX string.")
if len(references) > MAX_BIBTEX_CHARS:
raise ValueError("The BibTeX string must be fewer than {} chars "
", you have {}"
.format(MAX_BIBTEX_CHARS, len(references)))
self.references = references
# turn remarks into list of Strings
remarks = remarks if remarks else []
self.remarks = [remarks] if isinstance(remarks, str) else remarks
# check remarks limit
for r in self.remarks:
if len(r) > 140:
raise ValueError("The remark exceeds the maximum size of"
"140 characters: {}".format(r))
# check data limit
self.data = data if data else {}
if not sys.getsizeof(self.data) < MAX_DATA_SIZE:
raise ValueError("The data dict exceeds the maximum size limit of"
" {} bytes (you have {})"
.format(MAX_DATA_SIZE, sys.getsizeof(data)))
for k, v in self.data.items():
if not k.startswith("_"):
raise ValueError("data must contain properly namespaced data "
"with keys starting with an underscore. The "
"key {} does not start with an underscore.",
format(k))
# check for valid history nodes
history = history if history else [] # initialize null fields
if len(history) > MAX_HNODES:
raise ValueError("A maximum of {} History nodes are supported, "
"you have {}!".format(MAX_HNODES, len(history)))
self.history = [HistoryNode.parse_history_node(h) for h in history]
if not all([sys.getsizeof(h) < MAX_HNODE_SIZE for h in history]):
raise ValueError("One or more history nodes exceeds the maximum "
"size limit of {} bytes".format(MAX_HNODE_SIZE))
self.created_at = created_at if created_at \
else datetime.datetime.utcnow()
def as_dict(self):
"""
Returns: MSONable dict
"""
d = self.structure.as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["about"] = {"authors": [a.as_dict() for a in self.authors],
"projects": self.projects,
"references": self.references,
"remarks": self.remarks,
"history": [h.as_dict() for h in self.history],
"created_at": json.loads(json.dumps(self.created_at,
cls=MontyEncoder))}
d["about"].update(json.loads(json.dumps(self.data,
cls=MontyEncoder)))
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
Class
"""
a = d["about"]
dec = MontyDecoder()
created_at = dec.process_decoded(a.get("created_at"))
data = {k: v for k, v in d["about"].items()
if k.startswith("_")}
data = dec.process_decoded(data)
structure = Structure.from_dict(d) if "lattice" in d \
else Molecule.from_dict(d)
return cls(structure, a["authors"], projects=a.get("projects", None),
references=a.get("references", ""),
remarks=a.get("remarks", None), data=data,
history=a.get("history", None), created_at=created_at)
@classmethod
def from_structures(cls, structures, authors, projects=None,
references='', remarks=None, data=None,
histories=None, created_at=None):
"""
A convenience method for getting a list of StructureNL objects by
specifying structures and metadata separately. Some of the metadata
is applied to all of the structures for ease of use.
Args:
structures: A list of Structure objects
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects: List of Strings ['Project A', 'Project B']. This
applies to all structures.
references: A String in BibTeX format. Again, this applies to all
structures.
remarks: List of Strings ['Remark A', 'Remark B']
data: A list of free form dict. Namespaced at the root level
with an underscore, e.g. {"_materialsproject":<custom data>}
. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at: A datetime object
"""
data = [{}] * len(structures) if data is None else data
histories = [[]] * len(structures) if histories is None else \
histories
snl_list = []
for i, struct in enumerate(structures):
snl = StructureNL(struct, authors, projects=projects,
references=references,
remarks=remarks, data=data[i],
history=histories[i],
created_at=created_at)
snl_list.append(snl)
return snl_list
def __str__(self):
return "\n".join(["{}\n{}".format(k, getattr(self, k))
for k in ("structure", "authors", "projects",
"references", "remarks", "data", "history",
"created_at")])
def __eq__(self, other):
return all(map(lambda n: getattr(self, n) == getattr(other, n),
("structure", "authors", "projects", "references",
"remarks", "data", "history", "created_at")))
def __ne__(self, other):
return not self.__eq__(other)
| mit |
percipient/datadogpy | datadog/dogstatsd/base.py | 3 | 9678 | #!/usr/bin/env python
"""
DogStatsd is a Python client for DogStatsd, a Statsd fork for Datadog.
"""
import logging
from random import random
from time import time
import socket
from functools import wraps
try:
from itertools import imap
except ImportError:
imap = map
log = logging.getLogger('dogstatsd')
class DogStatsd(object):
OK, WARNING, CRITICAL, UNKNOWN = (0, 1, 2, 3)
def __init__(self, host='localhost', port=8125, max_buffer_size=50):
"""
Initialize a DogStatsd object.
>>> statsd = DogStatsd()
:param host: the host of the DogStatsd server.
:param port: the port of the DogStatsd server.
:param max_buffer_size: Maximum number of metric to buffer before sending to the server
if sending metrics in batch
"""
self.host = host
self.port = int(port)
self.socket = None
self.max_buffer_size = max_buffer_size
self._send = self._send_to_server
self.encoding = 'utf-8'
def __enter__(self):
self.open_buffer(self.max_buffer_size)
return self
def __exit__(self, type, value, traceback):
self.close_buffer()
def get_socket(self):
"""
Return a connected socket.
Note: connect the socket before assigning it to the class instance to
avoid bad thread race conditions.
"""
if not self.socket:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((self.host, self.port))
self.socket = sock
return self.socket
def open_buffer(self, max_buffer_size=50):
"""
Open a buffer to send a batch of metrics in one packet.
You can also use this as a context manager.
>>> with DogStatsd() as batch:
>>> batch.gauge('users.online', 123)
>>> batch.gauge('active.connections', 1001)
"""
self.max_buffer_size = max_buffer_size
self.buffer = []
self._send = self._send_to_buffer
def close_buffer(self):
"""
Flush the buffer and switch back to single metric packets.
"""
self._send = self._send_to_server
self._flush_buffer()
def gauge(self, metric, value, tags=None, sample_rate=1):
"""
Record the value of a gauge, optionally setting a list of tags and a
sample rate.
>>> statsd.gauge('users.online', 123)
>>> statsd.gauge('active.connections', 1001, tags=["protocol:http"])
"""
return self._report(metric, 'g', value, tags, sample_rate)
def increment(self, metric, value=1, tags=None, sample_rate=1):
"""
Increment a counter, optionally setting a value, tags and a sample
rate.
>>> statsd.increment('page.views')
>>> statsd.increment('files.transferred', 124)
"""
self._report(metric, 'c', value, tags, sample_rate)
def decrement(self, metric, value=1, tags=None, sample_rate=1):
"""
Decrement a counter, optionally setting a value, tags and a sample
rate.
>>> statsd.decrement('files.remaining')
>>> statsd.decrement('active.connections', 2)
"""
self._report(metric, 'c', -value, tags, sample_rate)
def histogram(self, metric, value, tags=None, sample_rate=1):
"""
Sample a histogram value, optionally setting tags and a sample rate.
>>> statsd.histogram('uploaded.file.size', 1445)
>>> statsd.histogram('album.photo.count', 26, tags=["gender:female"])
"""
self._report(metric, 'h', value, tags, sample_rate)
def timing(self, metric, value, tags=None, sample_rate=1):
"""
Record a timing, optionally setting tags and a sample rate.
>>> statsd.timing("query.response.time", 1234)
"""
self._report(metric, 'ms', value, tags, sample_rate)
class _TimedContextManagerDecorator(object):
"""
A context manager and a decorator which will report the elapsed time in
the context OR in a function call.
"""
def __init__(self, statsd, metric, tags=None, sample_rate=1):
self.statsd = statsd
self.metric = metric
self.tags = tags
self.sample_rate = sample_rate
def __call__(self, func):
"""Decorator which returns the elapsed time of the function call."""
@wraps(func)
def wrapped(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapped
def __enter__(self):
self.start = time()
def __exit__(self, type, value, traceback):
# Report the elapsed time of the context manager.
self.statsd.timing(self.metric, time() - self.start,
self.tags, self.sample_rate)
def timed(self, metric, tags=None, sample_rate=1):
"""
A decorator or context manager that will measure the distribution of a
function's/context's run time. Optionally specify a list of tags or a
sample rate.
::
@statsd.timed('user.query.time', sample_rate=0.5)
def get_user(user_id):
# Do what you need to ...
pass
# Is equivalent to ...
with statsd.timed('user.query.time', sample_rate=0.5):
# Do what you need to ...
pass
# Is equivalent to ...
start = time.time()
try:
get_user(user_id)
finally:
statsd.timing('user.query.time', time.time() - start)
"""
return self._TimedContextManagerDecorator(self, metric, tags, sample_rate)
def set(self, metric, value, tags=None, sample_rate=1):
"""
Sample a set value.
>>> statsd.set('visitors.uniques', 999)
"""
self._report(metric, 's', value, tags, sample_rate)
def _report(self, metric, metric_type, value, tags, sample_rate):
if sample_rate != 1 and random() > sample_rate:
return
payload = [metric, ":", value, "|", metric_type]
if sample_rate != 1:
payload.extend(["|@", sample_rate])
if tags:
payload.extend(["|#", ",".join(tags)])
encoded = "".join(imap(str, payload))
self._send(encoded)
def _send_to_server(self, packet):
try:
# If set, use socket directly
(self.socket or self.get_socket()).send(packet.encode(self.encoding))
except socket.error:
log.info("Error submitting packet, will try refreshing the socket")
self.socket = None
try:
self.get_socket().send(packet.encode(self.encoding))
except socket.error:
log.exception("Failed to send packet with a newly binded socket")
def _send_to_buffer(self, packet):
self.buffer.append(packet)
if len(self.buffer) >= self.max_buffer_size:
self._flush_buffer()
def _flush_buffer(self):
self._send_to_server("\n".join(self.buffer))
self.buffer = []
def _escape_event_content(self, string):
return string.replace('\n', '\\n')
def _escape_service_check_message(self, string):
return string.replace('\n', '\\n').replace('m:', 'm\:')
def event(self, title, text, alert_type=None, aggregation_key=None,
source_type_name=None, date_happened=None, priority=None,
tags=None, hostname=None):
"""
Send an event. Attributes are the same as the Event API.
http://docs.datadoghq.com/api/
>>> statsd.event('Man down!', 'This server needs assistance.')
>>> statsd.event('The web server restarted', 'The web server is up again', alert_type='success') # NOQA
"""
title = self._escape_event_content(title)
text = self._escape_event_content(text)
string = u'_e{%d,%d}:%s|%s' % (len(title), len(text), title, text)
if date_happened:
string = '%s|d:%d' % (string, date_happened)
if hostname:
string = '%s|h:%s' % (string, hostname)
if aggregation_key:
string = '%s|k:%s' % (string, aggregation_key)
if priority:
string = '%s|p:%s' % (string, priority)
if source_type_name:
string = '%s|s:%s' % (string, source_type_name)
if alert_type:
string = '%s|t:%s' % (string, alert_type)
if tags:
string = '%s|#%s' % (string, ','.join(tags))
if len(string) > 8 * 1024:
raise Exception(u'Event "%s" payload is too big (more that 8KB), '
'event discarded' % title)
self._send(string)
def service_check(self, check_name, status, tags=None, timestamp=None,
hostname=None, message=None):
"""
Send a service check run.
>>> statsd.service_check('my_service.check_name', DogStatsd.WARNING)
"""
message = self._escape_service_check_message(message) if message is not None else ''
string = u'_sc|{0}|{1}'.format(check_name, status)
if timestamp:
string = u'{0}|d:{1}'.format(string, timestamp)
if hostname:
string = u'{0}|h:{1}'.format(string, hostname)
if tags:
string = u'{0}|#{1}'.format(string, ','.join(tags))
if message:
string = u'{0}|m:{1}'.format(string, message)
self._send(string)
statsd = DogStatsd()
| bsd-3-clause |
miguelparaiso/PracticaOdoo | addons/membership/__openerp__.py | 197 | 2207 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Membership Management',
'version': '0.1',
'category': 'Association',
'description': """
This module allows you to manage all operations for managing memberships.
=========================================================================
It supports different kind of members:
--------------------------------------
* Free member
* Associated member (e.g.: a group subscribes to a membership for all subsidiaries)
* Paid members
* Special member prices
It is integrated with sales and accounting to allow you to automatically
invoice and send propositions for membership renewal.
""",
'author': 'OpenERP SA',
'depends': ['base', 'product', 'account'],
'data': [
'security/ir.model.access.csv',
'wizard/membership_invoice_view.xml',
'membership_data.xml',
'membership_view.xml',
'report/report_membership_view.xml',
],
'demo': [
'membership_demo.xml',
'membership_demo.yml'
],
'website': 'https://www.odoo.com/page/community-builder',
'test': ['test/test_membership.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pwaldo2/AWPUG-Plone-Demo | src/awpug.content/PasteScript-1.7.5-py2.7.egg/paste/script/help.py | 6 | 1943 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
from command import Command, get_commands
from command import parser as base_parser
class HelpCommand(Command):
summary = "Display help"
usage = '[COMMAND]'
max_args = 1
parser = Command.standard_parser()
def command(self):
if not self.args:
self.generic_help()
return
name = self.args[0]
commands = get_commands()
if name not in commands:
print 'No such command: %s' % name
self.generic_help()
return
command = commands[name].load()
runner = command(name)
runner.run(['-h'])
def generic_help(self):
base_parser.print_help()
print
commands_grouped = {}
commands = get_commands()
longest = max([len(n) for n in commands.keys()])
for name, command in commands.items():
try:
command = command.load()
except Exception, e:
print 'Cannot load command %s: %s' % (name, e)
continue
if getattr(command, 'hidden', False):
continue
commands_grouped.setdefault(
command.group_name, []).append((name, command))
commands_grouped = commands_grouped.items()
commands_grouped.sort()
print 'Commands:'
for group, commands in commands_grouped:
if group:
print group + ':'
commands.sort()
for name, command in commands:
print ' %s %s' % (self.pad(name, length=longest),
command.summary)
#if command.description:
# print self.indent_block(command.description, 4)
print
| gpl-2.0 |
michaeldavidcarr/namebench | nb_third_party/dns/rdtypes/ANY/NSEC3.py | 248 | 6716 | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import base64
import cStringIO
import string
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV',
'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
'0123456789ABCDEFGHIJKLMNOPQRSTUV')
# hash algorithm constants
SHA1 = 1
# flag constants
OPTOUT = 1
class NSEC3(dns.rdata.Rdata):
"""NSEC3 record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string
@ivar next: the next name hash
@type next: string
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt,
next, windows):
super(NSEC3, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
self.salt = salt
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = base64.b32encode(self.next).translate(b32_normal_to_hex).lower()
if self.salt == '':
salt = '-'
else:
salt = self.salt.encode('hex-codec')
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%u %u %u %s %s%s' % (self.algorithm, self.flags, self.iterations,
salt, next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == '-':
salt = ''
else:
salt = salt.decode('hex-codec')
next = tok.get_string().upper().translate(b32_hex_to_normal)
next = base64.b32decode(next)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NSEC3 with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("NSEC3 with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset / 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
l = len(self.next)
file.write(struct.pack("!B", l))
file.write(self.next)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
wire[current : current + 5])
current += 5
rdlen -= 5
salt = wire[current : current + slen]
current += slen
rdlen -= slen
(nlen, ) = struct.unpack('!B', wire[current])
current += 1
rdlen -= 1
next = wire[current : current + nlen]
current += nlen
rdlen -= nlen
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("NSEC3 too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad NSEC3 octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad NSEC3 bitmap length")
bitmap = wire[current : current + octets]
current += octets
rdlen -= octets
windows.append((window, bitmap))
return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
from_wire = classmethod(from_wire)
def _cmp(self, other):
b1 = cStringIO.StringIO()
self.to_wire(b1)
b2 = cStringIO.StringIO()
other.to_wire(b2)
return cmp(b1.getvalue(), b2.getvalue())
| apache-2.0 |
4Quant/tensorflow | tensorflow/python/ops/rnn.py | 1 | 21568 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
def rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
The simplest form of RNN network generated is:
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time t for batch row b,
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size [batch_size]. Values in [0, T).
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
state is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
fixed_batch_size = inputs[0].get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(inputs[0])[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length: # Prepare variables
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs[0].dtype)
zero_output.set_shape(
tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0: vs.get_variable_scope().reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length:
(output, state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def state_saving_rnn(cell, inputs, state_saver, state_name,
sequence_length=None, scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
state_saver: A state saver object with methods `state` and `save_state`.
state_name: The name to use with the state_saver.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
initial_state = state_saver.state(state_name)
(outputs, state) = rnn(cell, inputs, initial_state=initial_state,
sequence_length=sequence_length, scope=scope)
save_state = state_saver.save_state(state_name, state)
with ops.control_dependencies([save_state]):
outputs[-1] = array_ops.identity(outputs[-1])
return (outputs, state)
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
The pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: `Tensor` matrix of shape [batch_size, state_size]
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape [batch_size, output_size]
new_state is a `Tensor` matrix of shape [batch_size, state_size]
Returns:
A tuple of (final_output, final_state) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is a `Tensor` matrix of shape [batch_size, state_size]
"""
# Step 1: determine whether we need to call_cell or not
empty_update = lambda: (zero_output, state)
state_shape = state.get_shape()
output, new_state = control_flow_ops.cond(
time < max_sequence_length, call_cell, empty_update)
# Step 2: determine whether we need to copy through state and/or outputs
existing_output_state = lambda: (output, new_state)
def copy_through():
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
copy_cond = (time >= sequence_length)
return (math_ops.select(copy_cond, zero_output, output),
math_ops.select(copy_cond, state, new_state))
(output, state) = control_flow_ops.cond(
time < min_sequence_length, existing_output_state, copy_through)
output.set_shape(zero_output.get_shape())
state.set_shape(state_shape)
return (output, state)
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if either
of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size [batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
output_state_fw is the final state of the forward rnn
output_state_bw is the final state of the backward rnn
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
name = scope or "BiRNN"
# Forward direction
with vs.variable_scope(name + "_FW") as fw_scope:
output_fw, output_state_fw = rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
# Backward direction
with vs.variable_scope(name + "_BW") as bw_scope:
tmp, output_state_bw = rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return (outputs, output_state_fw, output_state_bw)
def dynamic_rnn(cell, inputs, sequence_length, initial_state=None, dtype=None,
parallel_iterations=None, swap_memory=False, time_major=False,
scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`. Instead,
it is a single `Tensor` where the maximum time is either the first or second
dimension (see the parameter `time_major`). The corresponding output is
a single `Tensor` having the same number of time steps and batch size.
The parameter `sequence_length` is required and dynamic calculation is
automatically performed.
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, cell.input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, cell.input_size]`.
sequence_length: An int32/int64 vector (tensor) size [batch_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Swap the tensors produced in forward inference but needed
for back prop from GPU to CPU.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using time_major = False is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state, shaped:
`[batch_size, cell.state_size]`.
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,D) => (T,B,D)
parallel_iterations = parallel_iterations or 32
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = array_ops.identity(sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.pack(shape)
return logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name="CheckSeqLen")
(outputs, final_state) = _dynamic_rnn_loop(
cell, inputs, state, sequence_length,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
outputs = array_ops.transpose(outputs, [1, 0, 2]) # (T,B,D) => (B,T,D)
return (outputs, final_state)
def _dynamic_rnn_loop(cell, inputs, initial_state, sequence_length,
parallel_iterations, swap_memory):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, depth].
initial_state: A `Tensor` of shape [batch_size, depth].
sequence_length: An `int32` `Tensor` of shape [batch_size].
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
Returns:
Tuple (final_outputs, final_state).
final_outputs:
A `Tensor` of shape [time, batch_size, depth]`.
final_state:
A `Tensor` of shape [batch_size, depth].
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
# Construct an initial output
input_shape = array_ops.shape(inputs)
(time_steps, batch_size, unused_depth) = array_ops.unpack(input_shape, 3)
inputs_got_shape = inputs.get_shape().with_rank(3)
(const_time_steps, const_batch_size, const_depth) = inputs_got_shape.as_list()
# Prepare dynamic conditional copying of state & output
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs.dtype)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.op_scope([], "dynamic_rnn") as scope:
base_name = scope
output_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "output")
input_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "input")
input_ta = input_ta.unpack(inputs)
def _time_step(time, state, output_ta_t):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
state: Vector.
output_ta_t: `TensorArray`, the output with existing flow.
Returns:
The tuple (time + 1, new_state, output_ta_t with updated flow).
"""
input_t = input_ta.read(time)
# Restore some shape information
input_t.set_shape([const_batch_size, const_depth])
(output, new_state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, lambda: cell(input_t, state))
output_ta_t = output_ta_t.write(time, output)
return (time + 1, new_state, output_ta_t)
(unused_final_time, final_state, output_final_ta) = control_flow_ops.While(
cond=lambda time, _1, _2: time < time_steps,
body=_time_step,
loop_vars=(time, state, output_ta),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs = output_final_ta.pack()
# Restore some shape information
final_outputs.set_shape([
const_time_steps, const_batch_size, cell.output_size])
return (final_outputs, final_state)
| apache-2.0 |
diagramsoftware/odoo | addons/account/wizard/account_fiscalyear_close.py | 222 | 15660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries', required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': lambda self, cr, uid, context: _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning!'), _('The entries to reconcile should belong to the same company.'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto', 'opening_reconciliation': True})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
# reconcile_ref deptends from reconcile_id but was not recomputed
obj_acc_move_line._store_set_values(cr, uid, ids, ['reconcile_ref'], context=context)
obj_acc_move_line.invalidate_cache(cr, uid, ['reconcile_id'], ids, context=context)
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('User Error!'), _('The periods to generate opening entries cannot be found.'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('User Error!'),
_('The journal must have default credit and debit account.'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('User Error!'),
_('The journal must have centralized counterpart without the Skipping draft state option checked.'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, opening_reconciliation=True, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = [x['id'] for x in result]
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
account.foreign_balance if account.currency_id else 0.0,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
self.invalidate_cache(cr, uid, context=context)
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
obj_acc_fiscalyear.invalidate_cache(cr, uid, ['end_journal_period_id'], [old_fyear.id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sknepneklab/SAMoS | utils/make_circular_patch.py | 1 | 3349 | # ***************************************************************************
# *
# * Copyright (C) 2013-2016 University of Dundee
# * All rights reserved.
# *
# * This file is part of SAMoS (Soft Active Matter on Surfaces) program.
# *
# * SAMoS is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * SAMoS is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# *****************************************************************************
# Utility code for generating intial configuration for cell simulations.
# This code places N cells in a patch of radius R keeing in mind that the
# minimum distance between two cells shold be greater than a certain value.
import sys
import argparse
import numpy as np
from random import uniform
from datetime import *
import math as m
from CellList2D import *
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", type=str, default='patch.dat', help="output file name")
parser.add_argument("-R", "--radius", type=float, default=20.0, help="patch radius")
parser.add_argument("-N", "--num", type=int, default=100, help="number of particles")
parser.add_argument("-m", "--min_dist", type=float, default=1.5, help="minium distance between particles")
parser.add_argument("-A", "--A0", type=float, default=m.pi, help="native cell area")
args = parser.parse_args()
print
print "\tSoft Actve Matter on Surfaces (SAMoS)"
print "\tGenerates a circial cell patch"
print
print "\tRastko Sknepnek"
print "\tUniversity of Dundee"
print "\t(c) 2015"
print "\t----------------------------------------------"
print
print "\tOutput files : ", args.output
print "\tPatch radius : ", args.radius
print "\tNumber of cells : ", args.num
print "\tMinimum distance between cells : ", args.min_dist
print
start = datetime.now()
R = args.radius
cl = CellList2D([2.2*R,2.2*R],2*args.min_dist)
particles = []
i = 0
while i < args.num:
x, y = uniform(-R,R), uniform(-R,R)
if (x**2 + y**2 < R**2):
cid = cl.get_cell_idx((x,y))
can_add = True
for nb in cl.cell_list[cid].neighbors:
for idx in cl.cell_list[nb].indices:
xi, yi = particles[idx]
dx, dy = x-xi, y-yi
if dx*dx + dy*dy < args.min_dist**2:
can_add = False
break
if not can_add:
break
if can_add:
print "Successfully added particle : ", i
particles.append((x,y))
cl.add_particle((x,y),i)
i += 1
out = open(args.output,'w')
out.write('keys: id x y nx ny nvx nvy nvz area\n')
for i in range(len(particles)):
x,y = particles[i]
phi = uniform(0,2*m.pi)
out.write('%4d %f %f %f %f %f %f %f %f\n' % (i,x,y, m.cos(phi),m.sin(phi), 0, 0, 1.0, args.A0))
out.close()
end = datetime.now()
total = end - start
print
print " *** Completed in ", total.total_seconds(), " seconds *** "
print | gpl-3.0 |
woobe/h2o | py/test_import2.py | 2 | 4794 | import unittest, time, sys, os
# not needed, but in case you move it down to subdir
sys.path.extend(['.','..'])
import h2o_cmd
import h2o
import h2o_browse as h2b
import h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.build_cloud(node_count=1,java_heap_GB=1)
h2b.browseTheCloud()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def notest_A_Basic(self):
# put file and parse, starting from the current wd
h2i.import_parse(path="testdir_multi_jvm/syn_sphere_gen.csv", schema='put')
def notest_B_Basic(self):
# put file and parse, will walk path looking upwards till it finds 'my-bucket' directory.
# Getting the absolute path for mydata/file.csv starts there
# default bucket name is 'home-0xdiag-datasets' (we can change that eventually)
h2i.import_parse(path='dir2/syn_sphere_gen2.csv', bucket='my-bucket2', schema='put')
def notest_C_Basic(self):
# this will do an import folder and parse. schema='local' is default. doesn't need to be specified
# I guess this will be relative to current wd
## if os env variable H2O_BUCKETS_ROOT is set, it will start looking there for bucket, then path
## that covers the case where "walking upward" is not sufficient for where you but the bucket (locally)
os.environ['H2O_BUCKETS_ROOT'] = '/home'
h2i.import_parse(path='dir3/syn_sphere_gen3.csv', bucket='my-bucket3', schema='local')
del os.environ['H2O_BUCKETS_ROOT']
def notest_D_Basic(self):
# this can be an absolute path for the local system
h2i.import_parse(path='/home/my-bucket2/dir2/syn_sphere_gen2.csv', schema='local')
def test_E_Basic(self):
# what happens here..abs path plus bucket. error?
h2i.import_parse(path='/dir3/syn_sphere_gen3.csv', bucket='my-bucket3', schema='local')
def test_F_Basic(self):
# causes exception
# h2i.import_parse(path="testdir_multi_jvm/syn_[1-2].csv", schema='put')
# no exception
h2i.import_parse(path="testdir_multi_jvm/syn[1-2].csv", schema='local')
## for specifying header_from_file...
## As long as header.csv was in the same directory (mydata), it will have been imported correctly.
## if not, another import_only step can be done (import itself does an import_only() step and a parse() step)
def test_G_Basic(self):
# defaults to import folder (schema='local')
h2i.import_parse(path="testdir_multi_jvm/syn[1-2].csv")
def test_H_Basic(self):
# maybe best to extra the key from an import? first?
# this isn't used much, maybe we don't care about this
h2i.import_only(path="testdir_multi_jvm/syn_test/syn_header.csv")
headerKey = h2i.find_key('syn_header.csv')
# comma 44 is separator
h2i.import_parse(path="testdir_multi_jvm/syn_test/syn[1-2].csv", header=1, header_from_file=headerKey, separator=44)
# symbolic links work
# ln -s /home/0xdiag/datasets home-0xdiag-datasets
# lrwxrwxrwx 1 kevin kevin 21 Aug 26 22:05 home-0xdiag-datasets -> /home/0xdiag/datasets
h2i.import_parse(path="standard/covtype.data", bucket="home-0xdiag-datasets")
## This will get it from import s3.
#import(path=junkdir/junk.csv, bucket="home-0xdiag-datasets", schema="s3")
#
## This will get it from import hdfs with s3n. the hdfs_name_node and hdfs_version for s3
# will have been passed at build_cloud, either from the test, or the <config>.json
#import(path=junkdir/junk.csv, bucket="home-0xdiag-datasets", schema="s3n")
#
## this will get it from hdfs. the hdfs_name_node and hdfs_version for hdfs will
# have been passed at build_cloud, either from the test, or the <config>.json.
## It defaults to the local 192.168.1.176 cdh3 hdfs
## I guess -hdfs_root behavior works, but shouldn't be necessary (full path will be sent to h2o)
#import(path=junkdir/junk.csv, bucket="home-0xdiag-datasets", schema="hdfs")
#
## separator, exclude params can be passed for the parse
#import(path=junkdir/junk.csv, bucket="home-0xdiag-datasets", schema="hdfs", separator=11)
#
#H2O_BUCKETS_ROOT is the only env variable that affects behavior
#there are two <config.json> node variables set during build_cloud that will
# redirect schema='local' to schema='s3n'
# node.redirect_import_folder_to_s3_path
# node.redirect_import_folder_to_s3n_path
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
Sorsly/subtle | google-cloud-sdk/lib/third_party/setuptools/command/build_clib.py | 314 | 4484 | import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp
)
if newer_pairwise_group(dependencies, expected_objects) != ([], []):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
| mit |
chrisjrn/registrasion | registrasion/tests/controller_helpers.py | 1 | 2034 | from registrasion.controllers.cart import CartController
from registrasion.controllers.credit_note import CreditNoteController
from registrasion.controllers.invoice import InvoiceController
from registrasion.models import commerce
from django.core.exceptions import ObjectDoesNotExist
class TestingCartController(CartController):
def set_quantity(self, product, quantity, batched=False):
''' Sets the _quantity_ of the given _product_ in the cart to the given
_quantity_. '''
self.set_quantities(((product, quantity),))
def add_to_cart(self, product, quantity):
''' Adds _quantity_ of the given _product_ to the cart. Raises
ValidationError if constraints are violated.'''
try:
product_item = commerce.ProductItem.objects.get(
cart=self.cart,
product=product)
old_quantity = product_item.quantity
except ObjectDoesNotExist:
old_quantity = 0
self.set_quantity(product, old_quantity + quantity)
def next_cart(self):
if self.cart.status == commerce.Cart.STATUS_ACTIVE:
self.cart.status = commerce.Cart.STATUS_PAID
self.cart.save()
class TestingInvoiceController(InvoiceController):
def pay(self, reference, amount, pre_validate=True):
''' Testing method for simulating an invoice paymenht by the given
amount. '''
if pre_validate:
# Manual payments don't pre-validate; we should test that things
# still work if we do silly things.
self.validate_allowed_to_pay()
''' Adds a payment '''
commerce.PaymentBase.objects.create(
invoice=self.invoice,
reference=reference,
amount=amount,
)
self.update_status()
class TestingCreditNoteController(CreditNoteController):
def refund(self):
commerce.CreditNoteRefund.objects.create(
parent=self.credit_note,
reference="Whoops."
)
| apache-2.0 |
shfengcj/pyminer | pyminer_setting.py | 1 | 1142 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 06:53:32 2015
@author: chaojun
"""
from pyminer_cos_model import lcdm
from pyminer_residual import JLAresiCal, CMBresiCal, BAOresiCal
# Genearl setting
divMax = 15 # for romberg integral
ogh2 = 2.469e-5
JLA_DIR = '/Users/chaojun/Documents/Research/2015/grb/pycode/data/jla'
# Cosmological model
model = lcdm(divmax = divMax)
# Data setting
use_sn_data = True
use_cmb_data = True
use_bao_data = True
resobj=[]
if use_sn_data : resobj.append( JLAresiCal(cosModel = model, DATA_DIR_JLA = JLA_DIR) )
if use_cmb_data: resobj.append( CMBresiCal(cosModel = model) )
if use_bao_data: resobj.append( BAOresiCal(cosModel = model) )
# Residual function
def residual(p, resobj = resobj, fjac=None):
import numpy as np
res = np.array([])
for obj in resobj:
tmp = obj.residual(p)
res = np.append(res, tmp)
status = 0
return [status, res]
# some other functions
def clear_env():
for key in globals().keys():
if not key.startswith("__"):
globals().pop(key)
| gpl-2.0 |
persandstrom/home-assistant | homeassistant/components/sensor/version.py | 3 | 1402 | """
Support for displaying the current version of Home Assistant.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.version/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import __version__, CONF_NAME
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Current Version"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Version sensor platform."""
name = config.get(CONF_NAME)
async_add_entities([VersionSensor(name)])
class VersionSensor(Entity):
"""Representation of a Home Assistant version sensor."""
def __init__(self, name):
"""Initialize the Version sensor."""
self._name = name
self._state = __version__
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the sensor."""
return self._state
| apache-2.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/Crypto/SelfTest/Util/test_asn1.py | 113 | 10239 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_asn.py: Self-test for the Crypto.Util.asn1 module
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-tests for Crypto.Util.asn1"""
__revision__ = "$Id$"
import unittest
import sys
from Crypto.Util.py3compat import *
from Crypto.Util.asn1 import DerSequence, DerObject
class DerObjectTests(unittest.TestCase):
def testObjEncode1(self):
# No payload
der = DerObject(b('\x33'))
self.assertEquals(der.encode(), b('\x33\x00'))
# Small payload
der.payload = b('\x45')
self.assertEquals(der.encode(), b('\x33\x01\x45'))
# Invariant
self.assertEquals(der.encode(), b('\x33\x01\x45'))
# Initialize with numerical tag
der = DerObject(b(0x33))
der.payload = b('\x45')
self.assertEquals(der.encode(), b('\x33\x01\x45'))
def testObjEncode2(self):
# Known types
der = DerObject('SEQUENCE')
self.assertEquals(der.encode(), b('\x30\x00'))
der = DerObject('BIT STRING')
self.assertEquals(der.encode(), b('\x03\x00'))
def testObjEncode3(self):
# Long payload
der = DerObject(b('\x34'))
der.payload = b("0")*128
self.assertEquals(der.encode(), b('\x34\x81\x80' + "0"*128))
def testObjDecode1(self):
# Decode short payload
der = DerObject()
der.decode(b('\x20\x02\x01\x02'))
self.assertEquals(der.payload, b("\x01\x02"))
self.assertEquals(der.typeTag, 0x20)
def testObjDecode2(self):
# Decode short payload
der = DerObject()
der.decode(b('\x22\x81\x80' + "1"*128))
self.assertEquals(der.payload, b("1")*128)
self.assertEquals(der.typeTag, 0x22)
class DerSequenceTests(unittest.TestCase):
def testEncode1(self):
# Empty sequence
der = DerSequence()
self.assertEquals(der.encode(), b('0\x00'))
self.failIf(der.hasOnlyInts())
# One single-byte integer (zero)
der.append(0)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x00'))
self.failUnless(der.hasOnlyInts())
# Invariant
self.assertEquals(der.encode(), b('0\x03\x02\x01\x00'))
def testEncode2(self):
# One single-byte integer (non-zero)
der = DerSequence()
der.append(127)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x7f'))
# Indexing
der[0] = 1
self.assertEquals(len(der),1)
self.assertEquals(der[0],1)
self.assertEquals(der[-1],1)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x01'))
#
der[:] = [1]
self.assertEquals(len(der),1)
self.assertEquals(der[0],1)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x01'))
def testEncode3(self):
# One multi-byte integer (non-zero)
der = DerSequence()
der.append(0x180L)
self.assertEquals(der.encode(), b('0\x04\x02\x02\x01\x80'))
def testEncode4(self):
# One very long integer
der = DerSequence()
der.append(2**2048)
self.assertEquals(der.encode(), b('0\x82\x01\x05')+
b('\x02\x82\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00'))
def testEncode5(self):
# One single-byte integer (looks negative)
der = DerSequence()
der.append(0xFFL)
self.assertEquals(der.encode(), b('0\x04\x02\x02\x00\xff'))
def testEncode6(self):
# Two integers
der = DerSequence()
der.append(0x180L)
der.append(0xFFL)
self.assertEquals(der.encode(), b('0\x08\x02\x02\x01\x80\x02\x02\x00\xff'))
self.failUnless(der.hasOnlyInts())
#
der.append(0x01)
der[1:] = [9,8]
self.assertEquals(len(der),3)
self.assertEqual(der[1:],[9,8])
self.assertEqual(der[1:-1],[9])
self.assertEquals(der.encode(), b('0\x0A\x02\x02\x01\x80\x02\x01\x09\x02\x01\x08'))
def testEncode6(self):
# One integer and another type (no matter what it is)
der = DerSequence()
der.append(0x180L)
der.append(b('\x00\x02\x00\x00'))
self.assertEquals(der.encode(), b('0\x08\x02\x02\x01\x80\x00\x02\x00\x00'))
self.failIf(der.hasOnlyInts())
####
def testDecode1(self):
# Empty sequence
der = DerSequence()
der.decode(b('0\x00'))
self.assertEquals(len(der),0)
# One single-byte integer (zero)
der.decode(b('0\x03\x02\x01\x00'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0)
# Invariant
der.decode(b('0\x03\x02\x01\x00'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0)
def testDecode2(self):
# One single-byte integer (non-zero)
der = DerSequence()
der.decode(b('0\x03\x02\x01\x7f'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],127)
def testDecode3(self):
# One multi-byte integer (non-zero)
der = DerSequence()
der.decode(b('0\x04\x02\x02\x01\x80'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0x180L)
def testDecode4(self):
# One very long integer
der = DerSequence()
der.decode(b('0\x82\x01\x05')+
b('\x02\x82\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],2**2048)
def testDecode5(self):
# One single-byte integer (looks negative)
der = DerSequence()
der.decode(b('0\x04\x02\x02\x00\xff'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0xFFL)
def testDecode6(self):
# Two integers
der = DerSequence()
der.decode(b('0\x08\x02\x02\x01\x80\x02\x02\x00\xff'))
self.assertEquals(len(der),2)
self.assertEquals(der[0],0x180L)
self.assertEquals(der[1],0xFFL)
def testDecode7(self):
# One integer and 2 other types
der = DerSequence()
der.decode(b('0\x0A\x02\x02\x01\x80\x24\x02\xb6\x63\x12\x00'))
self.assertEquals(len(der),3)
self.assertEquals(der[0],0x180L)
self.assertEquals(der[1],b('\x24\x02\xb6\x63'))
self.assertEquals(der[2],b('\x12\x00'))
def testDecode8(self):
# Only 2 other types
der = DerSequence()
der.decode(b('0\x06\x24\x02\xb6\x63\x12\x00'))
self.assertEquals(len(der),2)
self.assertEquals(der[0],b('\x24\x02\xb6\x63'))
self.assertEquals(der[1],b('\x12\x00'))
def testErrDecode1(self):
# Not a sequence
der = DerSequence()
self.assertRaises(ValueError, der.decode, b(''))
self.assertRaises(ValueError, der.decode, b('\x00'))
self.assertRaises(ValueError, der.decode, b('\x30'))
def testErrDecode2(self):
# Wrong payload type
der = DerSequence()
self.assertRaises(ValueError, der.decode, b('\x30\x00\x00'), True)
def testErrDecode3(self):
# Wrong length format
der = DerSequence()
self.assertRaises(ValueError, der.decode, b('\x30\x04\x02\x01\x01\x00'))
self.assertRaises(ValueError, der.decode, b('\x30\x81\x03\x02\x01\x01'))
self.assertRaises(ValueError, der.decode, b('\x30\x04\x02\x81\x01\x01'))
def testErrDecode4(self):
# Wrong integer format
der = DerSequence()
# Multi-byte encoding for zero
#self.assertRaises(ValueError, der.decode, '\x30\x04\x02\x02\x00\x00')
# Negative integer
self.assertRaises(ValueError, der.decode, b('\x30\x04\x02\x01\xFF'))
def get_tests(config={}):
from Crypto.SelfTest.st_common import list_test_cases
listTests = []
listTests += list_test_cases(DerObjectTests)
listTests += list_test_cases(DerSequenceTests)
return listTests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 |
charris/numpy | numpy/ma/tests/test_deprecations.py | 17 | 2258 | """Test deprecation and future warnings.
"""
import numpy as np
from numpy.testing import assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
class TestArgsort:
""" gh-8701 """
def _test_base(self, argsort, cls):
arr_0d = np.array(1).view(cls)
argsort(arr_0d)
arr_1d = np.array([1, 2, 3]).view(cls)
argsort(arr_1d)
# argsort has a bad default for >1d arrays
arr_2d = np.array([[1, 2], [3, 4]]).view(cls)
result = assert_warns(
np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d)
assert_equal(result, argsort(arr_2d, axis=None))
# should be no warnings for explicitly specifying it
argsort(arr_2d, axis=None)
argsort(arr_2d, axis=-1)
def test_function_ndarray(self):
return self._test_base(np.ma.argsort, np.ndarray)
def test_function_maskedarray(self):
return self._test_base(np.ma.argsort, np.ma.MaskedArray)
def test_method(self):
return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
class TestMinimumMaximum:
def test_minimum(self):
assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2]))
def test_maximum(self):
assert_warns(DeprecationWarning, np.ma.maximum, np.ma.array([1, 2]))
def test_axis_default(self):
# NumPy 1.13, 2017-05-06
data1d = np.ma.arange(6)
data2d = data1d.reshape(2, 3)
ma_min = np.ma.minimum.reduce
ma_max = np.ma.maximum.reduce
# check that the default axis is still None, but warns on 2d arrays
result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d)
assert_equal(result, ma_max(data2d, axis=None))
result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d)
assert_equal(result, ma_min(data2d, axis=None))
# no warnings on 1d, as both new and old defaults are equivalent
result = ma_min(data1d)
assert_equal(result, ma_min(data1d, axis=None))
assert_equal(result, ma_min(data1d, axis=0))
result = ma_max(data1d)
assert_equal(result, ma_max(data1d, axis=None))
assert_equal(result, ma_max(data1d, axis=0))
| bsd-3-clause |
oandrew/home-assistant | homeassistant/scripts/check_config.py | 6 | 10301 | """Script to ensure a configuration file exists."""
import argparse
import logging
import os
from collections import OrderedDict
from glob import glob
from platform import system
from unittest.mock import patch
from typing import Dict, List, Sequence
import homeassistant.bootstrap as bootstrap
import homeassistant.config as config_util
import homeassistant.loader as loader
import homeassistant.util.yaml as yaml
from homeassistant.exceptions import HomeAssistantError
REQUIREMENTS = ('colorlog>2.1,<3',)
if system() == 'Windows': # Ensure colorama installed for colorlog on Windows
REQUIREMENTS += ('colorama<=1',)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS = {
'load': ("homeassistant.util.yaml.load_yaml", yaml.load_yaml),
'load*': ("homeassistant.config.load_yaml", yaml.load_yaml),
'get': ("homeassistant.loader.get_component", loader.get_component),
'secrets': ("homeassistant.util.yaml._secret_yaml", yaml._secret_yaml),
'except': ("homeassistant.bootstrap.async_log_exception",
bootstrap.async_log_exception)
}
SILENCE = (
'homeassistant.bootstrap.clear_secret_cache',
'homeassistant.core._LOGGER.info',
'homeassistant.loader._LOGGER.info',
'homeassistant.bootstrap._LOGGER.info',
'homeassistant.bootstrap._LOGGER.warning',
'homeassistant.util.yaml._LOGGER.debug',
)
PATCHES = {}
C_HEAD = 'bold'
ERROR_STR = 'General Errors'
def color(the_color, *args, reset=None):
"""Color helper."""
from colorlog.escape_codes import escape_codes, parse_colors
try:
if len(args) == 0:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + ' '.join(args) + \
escape_codes[reset or 'reset']
except KeyError as k:
raise ValueError("Invalid color {} in {}".format(str(k), the_color))
def run(script_args: List) -> int:
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=("Check Home Assistant configuration."))
parser.add_argument(
'--script', choices=['check_config'])
parser.add_argument(
'-c', '--config',
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration")
parser.add_argument(
'-i', '--info',
default=None,
help="Show a portion of the config")
parser.add_argument(
'-f', '--files',
action='store_true',
help="Show used configuration files")
parser.add_argument(
'-s', '--secrets',
action='store_true',
help="Show secret information")
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
config_path = os.path.join(config_dir, 'configuration.yaml')
if not os.path.isfile(config_path):
print('Config does not exist:', config_path)
return 1
print(color('bold', "Testing configuration at", config_dir))
domain_info = []
if args.info:
domain_info = args.info.split(',')
res = check(config_path)
if args.files:
print(color(C_HEAD, 'yaml files'), '(used /',
color('red', 'not used') + ')')
# Python 3.5 gets a recursive, but not in 3.4
for yfn in sorted(glob(os.path.join(config_dir, '*.yaml')) +
glob(os.path.join(config_dir, '*/*.yaml'))):
the_color = '' if yfn in res['yaml_files'] else 'red'
print(color(the_color, '-', yfn))
if len(res['except']) > 0:
print(color('bold_white', 'Failed config'))
for domain, config in res['except'].items():
domain_info.append(domain)
print(' ', color('bold_red', domain + ':'),
color('red', '', reset='red'))
dump_dict(config, reset='red')
print(color('reset'))
if domain_info:
if 'all' in domain_info:
print(color('bold_white', 'Successful config (all)'))
for domain, config in res['components'].items():
print(' ', color(C_HEAD, domain + ':'))
dump_dict(config)
else:
print(color('bold_white', 'Successful config (partial)'))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(' ', color(C_HEAD, domain + ':'))
dump_dict(res['components'].get(domain, None))
if args.secrets:
flatsecret = {}
for sfn, sdict in res['secret_cache'].items():
sss = []
for skey, sval in sdict.items():
if skey in flatsecret:
_LOGGER.error('Duplicated secrets in files %s and %s',
flatsecret[skey], sfn)
flatsecret[skey] = sfn
sss.append(color('green', skey) if skey in res['secrets']
else skey)
print(color(C_HEAD, 'Secrets from', sfn + ':'), ', '.join(sss))
print(color(C_HEAD, 'Used Secrets:'))
for skey, sval in res['secrets'].items():
print(' -', skey + ':', sval, color('cyan', '[from:', flatsecret
.get(skey, 'keyring') + ']'))
return 0
def check(config_path):
"""Perform a check by mocking hass load functions."""
res = {
'yaml_files': OrderedDict(), # yaml_files loaded
'secrets': OrderedDict(), # secret cache and secrets loaded
'except': OrderedDict(), # exceptions raised (with config)
'components': OrderedDict(), # successful components
'secret_cache': OrderedDict(),
}
# pylint: disable=unused-variable
def mock_load(filename):
"""Mock hass.util.load_yaml to save config files."""
res['yaml_files'][filename] = True
return MOCKS['load'][1](filename)
# pylint: disable=unused-variable
def mock_get(comp_name):
"""Mock hass.loader.get_component to replace setup & setup_platform."""
def mock_setup(*kwargs):
"""Mock setup, only record the component name & config."""
assert comp_name not in res['components'], \
"Components should contain a list of platforms"
res['components'][comp_name] = kwargs[1].get(comp_name)
return True
module = MOCKS['get'][1](comp_name)
if module is None:
# Ensure list
res['except'][ERROR_STR] = res['except'].get(ERROR_STR, [])
res['except'][ERROR_STR].append('{} not found: {}'.format(
'Platform' if '.' in comp_name else 'Component', comp_name))
return None
# Test if platform/component and overwrite setup
if '.' in comp_name:
module.setup_platform = mock_setup
if hasattr(module, 'async_setup_platform'):
del module.async_setup_platform
else:
module.setup = mock_setup
if hasattr(module, 'async_setup'):
del module.async_setup
return module
# pylint: disable=unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS['secrets'][1](ldr, node)
except HomeAssistantError:
val = None
res['secrets'][node.value] = val
return val
def mock_except(ex, domain, config, # pylint: disable=unused-variable
hass=None):
"""Mock bootstrap.log_exception."""
MOCKS['except'][1](ex, domain, config, hass)
res['except'][domain] = config.get(domain, config)
# Patches to skip functions
for sil in SILENCE:
PATCHES[sil] = patch(sil)
# Patches with local mock functions
for key, val in MOCKS.items():
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()['mock_' + key.replace('*', '')]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
# Ensure !secrets point to the patched function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml._secret_yaml)
try:
bootstrap.from_config_file(config_path, skip_pip=True)
res['secret_cache'] = dict(yaml.__SECRET_CACHE)
except Exception as err: # pylint: disable=broad-except
print(color('red', 'Fatal error while loading config:'), str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
# Ensure !secrets point to the original function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml._secret_yaml)
bootstrap.clear_secret_cache()
return res
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml.yaml.dump(config).
"""
def line_src(this):
"""Display line config source."""
if hasattr(this, '__config_file__'):
return color('cyan', "[source {}:{}]"
.format(this.__config_file__, this.__line__ or '?'),
**kwargs)
return ''
def sort_dict_key(val):
"""Return the dict key for sorting."""
skey = str.lower(val[0])
if str(skey) == 'platform':
skey = '0'
return skey
indent_str = indent_count * ' '
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + '-'
if isinstance(layer, Dict):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, dict) or isinstance(value, list):
print(indent_str, key + ':', line_src(value))
dump_dict(value, indent_count + 2)
else:
print(indent_str, key + ':', value)
indent_str = indent_count * ' '
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(' ', indent_str, i)
| mit |
phazel/pixelated-user-agent | service/test/unit/resources/test_keys_resources.py | 7 | 2316 | import json
import ast
from mockito import mock, when
from leap.keymanager import OpenPGPKey, KeyNotFound
from pixelated.resources.keys_resource import KeysResource
import twisted.trial.unittest as unittest
from twisted.web.test.requesthelper import DummyRequest
from twisted.internet import defer
from test.unit.resources import DummySite
class TestKeysResource(unittest.TestCase):
def setUp(self):
self.keymanager = mock()
self.web = DummySite(KeysResource(self.keymanager))
def test_returns_404_if_key_not_found(self):
request = DummyRequest(['/keys'])
request.addArg('search', 'some@inexistent.key')
when(self.keymanager).fetch_key('some@inexistent.key').thenReturn(defer.fail(KeyNotFound()))
d = self.web.get(request)
def assert_404(_):
self.assertEquals(404, request.code)
d.addCallback(assert_404)
return d
def test_returns_the_key_as_json_if_found(self):
request = DummyRequest(['/keys'])
request.addArg('search', 'some@key')
when(self.keymanager).fetch_key('some@key').thenReturn(defer.succeed(OpenPGPKey('some@key')))
d = self.web.get(request)
expected = {
"tags": ["keymanager-key"],
"fingerprint": '',
"private": False,
'sign_used': False,
'refreshed_at': 0,
"expiry_date": 0,
"address": 'some@key',
'encr_used': False,
'last_audited_at': 0,
'key_data': '',
'length': 0,
'key_id': '',
'validation': 'Weak_Chain',
'type': 'OpenPGPKey',
}
def assert_response(_):
actual = json.loads(ast.literal_eval(request.written[0]))
self.assertEquals(expected, actual)
d.addCallback(assert_response)
return d
def test_returns_unauthorized_if_key_is_private(self):
request = DummyRequest(['/keys'])
request.addArg('search', 'some@key')
when(self.keymanager).fetch_key('some@key').thenReturn(defer.succeed(OpenPGPKey('some@key', private=True)))
d = self.web.get(request)
def assert_response(_):
self.assertEquals(401, request.code)
d.addCallback(assert_response)
return d
| agpl-3.0 |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/django/contrib/gis/db/models/sql/aggregates.py | 309 | 1804 | from django.db.models.sql.aggregates import *
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql.conversion import GeomField
class GeoAggregate(Aggregate):
# Default SQL template for spatial aggregates.
sql_template = '%(function)s(%(field)s)'
# Conversion class, if necessary.
conversion_class = None
# Flags for indicating the type of the aggregate.
is_extent = False
def __init__(self, col, source=None, is_summary=False, tolerance=0.05, **extra):
super(GeoAggregate, self).__init__(col, source, is_summary, **extra)
# Required by some Oracle aggregates.
self.tolerance = tolerance
# Can't use geographic aggregates on non-geometry fields.
if not isinstance(self.source, GeometryField):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if connection.ops.oracle:
self.extra['tolerance'] = self.tolerance
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
sql_template, sql_function = connection.ops.spatial_aggregate_sql(self)
params = {
'function': sql_function,
'field': field_name
}
params.update(self.extra)
return sql_template % params
class Collect(GeoAggregate):
pass
class Extent(GeoAggregate):
is_extent = '2D'
class Extent3D(GeoAggregate):
is_extent = '3D'
class MakeLine(GeoAggregate):
pass
class Union(GeoAggregate):
pass
| apache-2.0 |
cytsao/X-Informatics-1.3.0 | tests/functional/actions.py | 2 | 21562 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""A collection of actions for testing Course Builder pages."""
import cgi
import functools
import logging
import os
import re
import urllib
import appengine_config
from controllers import sites
from controllers import utils
import main
from models import config
from tests import suite
from google.appengine.api import namespace_manager
# All URLs referred to from all the pages.
UNIQUE_URLS_FOUND = {}
BASE_HOOK_POINTS = [
'<!-- base.before_head_tag_ends -->',
'<!-- base.after_body_tag_begins -->',
'<!-- base.after_navbar_begins -->',
'<!-- base.before_navbar_ends -->',
'<!-- base.after_top_content_ends -->',
'<!-- base.after_main_content_ends -->',
'<!-- base.before_body_tag_ends -->']
UNIT_HOOK_POINTS = [
'<!-- unit.after_leftnav_begins -->',
'<!-- unit.before_leftnav_ends -->',
'<!-- unit.after_content_begins -->',
'<!-- unit.before_content_ends -->']
PREVIEW_HOOK_POINTS = [
'<!-- preview.after_top_content_ends -->',
'<!-- preview.after_main_content_ends -->']
class ShouldHaveFailedByNow(Exception):
"""Special exception raised when a prior method did not raise."""
pass
class TestBase(suite.AppEngineTestBase):
"""Contains methods common to all functional tests."""
last_request_url = None
def getApp(self): # pylint: disable-msg=g-bad-name
main.debug = True
sites.ApplicationRequestHandler.bind(main.namespaced_routes)
return main.app
def assert_default_namespace(self):
ns = namespace_manager.get_namespace()
if ns != appengine_config.DEFAULT_NAMESPACE_NAME:
raise Exception('Expected default namespace, found: %s' % ns)
def setUp(self): # pylint: disable-msg=g-bad-name
super(TestBase, self).setUp()
self.supports_editing = False
self.assert_default_namespace()
self.namespace = ''
self.base = '/'
# Reload all properties now to flush the values modified in other tests.
config.Registry.get_overrides(True)
def tearDown(self): # pylint: disable-msg=g-bad-name
self.assert_default_namespace()
super(TestBase, self).tearDown()
def canonicalize(self, href, response=None):
"""Create absolute URL using <base> if defined, self.base otherwise."""
if href.startswith('/') or utils.ApplicationHandler.is_absolute(href):
pass
else:
base = self.base
if response:
match = re.search(
r'<base href=[\'"]?([^\'" >]+)', response.body)
if match and not href.startswith('/'):
base = match.groups()[0]
if not base.endswith('/'):
base += '/'
href = '%s%s' % (base, href)
self.audit_url(href)
return href
def audit_url(self, url):
"""Record for audit purposes the URL we encountered."""
UNIQUE_URLS_FOUND[url] = True
def hook_response(self, response):
"""Modify response.goto() to compute URL using <base>, if defined."""
if response.status_int == 200:
self.check_response_hrefs(response)
self.last_request_url = self.canonicalize(response.request.path)
gotox = response.goto
def new_goto(href, method='get', **args):
return gotox(self.canonicalize(href), method, **args)
response.goto = new_goto
return response
def check_response_hrefs(self, response):
"""Check response page URLs are properly formatted/canonicalized."""
hrefs = re.findall(r'href=[\'"]?([^\'" >]+)', response.body)
srcs = re.findall(r'src=[\'"]?([^\'" >]+)', response.body)
for url in hrefs + srcs:
# We expect all internal URLs to be relative: 'asset/css/main.css',
# and use <base> tag. All others URLs must be whitelisted below.
if url.startswith('/'):
absolute = url.startswith('//')
root = url == '/'
canonical = url.startswith(self.base)
allowed = url.startswith('/admin') or url.startswith('/_ah/')
if not (absolute or root or canonical or allowed):
raise Exception('Invalid reference \'%s\' in:\n%s' % (
url, response.body))
self.audit_url(self.canonicalize(url, response=response))
def get(self, url, **kwargs):
url = self.canonicalize(url)
logging.info('HTTP Get: %s', url)
response = self.testapp.get(url, **kwargs)
return self.hook_response(response)
def post(self, url, params, expect_errors=False):
url = self.canonicalize(url)
logging.info('HTTP Post: %s', url)
response = self.testapp.post(url, params, expect_errors=expect_errors)
return self.hook_response(response)
def put(self, url, params, expect_errors=False):
url = self.canonicalize(url)
logging.info('HTTP Put: %s', url)
response = self.testapp.put(url, params, expect_errors=expect_errors)
return self.hook_response(response)
def click(self, response, name):
logging.info('Link click: %s', name)
response = response.click(name)
return self.hook_response(response)
def submit(self, form):
logging.info('Form submit: %s', form)
response = form.submit()
return self.hook_response(response)
def assert_equals(actual, expected):
if expected != actual:
raise Exception('Expected \'%s\', does not match actual \'%s\'.' %
(expected, actual))
def to_unicode(text):
"""Converts text to Unicode if is not Unicode already."""
if not isinstance(text, unicode):
return unicode(text, 'utf-8')
return text
def assert_contains(needle, haystack, collapse_whitespace=False):
needle = to_unicode(needle)
haystack = to_unicode(haystack)
if collapse_whitespace:
haystack = ' '.join(haystack.replace('\n', ' ').split())
if needle not in haystack:
raise Exception('Can\'t find \'%s\' in \'%s\'.' % (needle, haystack))
def assert_contains_all_of(needles, haystack):
haystack = to_unicode(haystack)
for needle in needles:
needle = to_unicode(needle)
if needle not in haystack:
raise Exception(
'Can\'t find \'%s\' in \'%s\'.' % (needle, haystack))
def assert_does_not_contain(needle, haystack, collapse_whitespace=False):
needle = to_unicode(needle)
haystack = to_unicode(haystack)
if collapse_whitespace:
haystack = ' '.join(haystack.replace('\n', ' ').split())
if needle in haystack:
raise Exception('Found \'%s\' in \'%s\'.' % (needle, haystack))
def assert_contains_none_of(needles, haystack):
haystack = to_unicode(haystack)
for needle in needles:
needle = to_unicode(needle)
if needle in haystack:
raise Exception('Found \'%s\' in \'%s\'.' % (needle, haystack))
def assert_none_fail(browser, callbacks):
"""Invokes all callbacks and expects each one not to fail."""
for callback in callbacks:
callback(browser)
def assert_at_least_one_succeeds(callbacks):
"""Invokes all callbacks and expects at least one to succeed."""
for callback in callbacks:
try:
callback()
return True
except Exception: # pylint: disable-msg=broad-except
pass
raise Exception('All callbacks failed.')
def assert_all_fail(browser, callbacks):
"""Invokes all callbacks and expects each one to fail."""
for callback in callbacks:
try:
callback(browser)
raise ShouldHaveFailedByNow(
'Expected to fail: %s().' % callback.__name__)
except ShouldHaveFailedByNow as e:
raise e
except Exception:
pass
def login(email, is_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = email
is_admin_value = '0'
if is_admin:
is_admin_value = '1'
os.environ['USER_IS_ADMIN'] = is_admin_value
def get_current_user_email():
email = os.environ['USER_EMAIL']
if not email:
raise Exception('No current user.')
return email
def logout():
del os.environ['USER_EMAIL']
del os.environ['USER_ID']
del os.environ['USER_IS_ADMIN']
def register(browser, name):
"""Registers a new student with the given name."""
response = view_registration(browser)
response.form.set('form01', name)
response = browser.submit(response.form)
assert_equals(response.status_int, 302)
assert_contains(
'course#registration_confirmation', response.headers['location'])
check_profile(browser, name)
return response
def check_profile(browser, name):
response = view_my_profile(browser)
assert_contains('Email', response.body)
assert_contains(cgi.escape(name), response.body)
assert_contains(get_current_user_email(), response.body)
return response
def view_registration(browser):
response = browser.get('register')
check_personalization(browser, response)
assert_contains('What is your name?', response.body)
assert_contains_all_of([
'<!-- reg_form.additional_registration_fields -->'], response.body)
return response
def register_with_additional_fields(browser, name, data2, data3):
"""Registers a new student with customized registration form."""
response = browser.get('/')
assert_equals(response.status_int, 302)
response = view_registration(browser)
response.form.set('form01', name)
response.form.set('form02', data2)
response.form.set('form03', data3)
response = browser.submit(response.form)
assert_equals(response.status_int, 302)
assert_contains(
'course#registration_confirmation', response.headers['location'])
check_profile(browser, name)
def check_logout_link(response_body):
assert_contains(get_current_user_email(), response_body)
def check_login_link(response_body):
assert_contains('Login', response_body)
def check_personalization(browser, response):
"""Checks that the login/logout text is correct."""
sites.set_path_info(browser.last_request_url)
app_context = sites.get_course_for_current_request()
sites.unset_path_info()
browsable = app_context.get_environ()['course']['browsable']
if browsable:
callbacks = [
functools.partial(check_login_link, response.body),
functools.partial(check_logout_link, response.body)
]
assert_at_least_one_succeeds(callbacks)
else:
check_logout_link(response.body)
def view_preview(browser):
"""Views /preview page."""
response = browser.get('preview')
assert_contains(' the stakes are high.', response.body)
assert_contains(
'<li><p class="gcb-top-content">Pre-course assessment</p></li>',
response.body)
assert_contains_none_of(UNIT_HOOK_POINTS, response.body)
assert_contains_all_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_course(browser):
"""Views /course page."""
response = browser.get('course')
assert_contains(' the stakes are high.', response.body)
assert_contains('<a href="assessment?name=Pre">Pre-course assessment</a>',
response.body)
check_personalization(browser, response)
assert_contains_all_of(BASE_HOOK_POINTS, response.body)
assert_contains_none_of(UNIT_HOOK_POINTS, response.body)
assert_contains_none_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_unit(browser):
"""Views /unit page."""
response = browser.get('unit?unit=1&lesson=1')
assert_contains('Unit 1 - Introduction', response.body)
assert_contains('1.3 How search works', response.body)
assert_contains('1.6 Finding text on a web page', response.body)
assert_contains('https://www.youtube.com/embed/1ppwmxidyIE', response.body)
check_personalization(browser, response)
assert_contains_all_of(BASE_HOOK_POINTS, response.body)
assert_contains_all_of(UNIT_HOOK_POINTS, response.body)
assert_contains_none_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_activity(browser):
response = browser.get('activity?unit=1&lesson=2')
assert_contains('<script src="assets/js/activity-1.2.js"></script>',
response.body)
check_personalization(browser, response)
return response
def view_announcements(browser):
response = browser.get('announcements')
assert_equals(response.status_int, 200)
return response
def view_my_profile(browser):
response = browser.get('student/home')
assert_contains('Date enrolled', response.body)
check_personalization(browser, response)
return response
def view_forum(browser):
response = browser.get('forum')
assert_contains('document.getElementById("forum_embed").src =',
response.body)
check_personalization(browser, response)
return response
def view_assessments(browser):
for name in ['Pre', 'Mid', 'Fin']:
response = browser.get('assessment?name=%s' % name)
assert 'assets/js/assessment-%s.js' % name in response.body
assert_equals(response.status_int, 200)
check_personalization(browser, response)
def submit_assessment(browser, unit_id, args, presubmit_checks=True):
"""Submits an assessment."""
response = browser.get('assessment?name=%s' % unit_id)
if presubmit_checks:
assert_contains(
'<script src="assets/js/assessment-%s.js"></script>' % unit_id,
response.body)
js_response = browser.get('assets/js/assessment-%s.js' % unit_id)
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'assessmentXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
response = browser.post('answer', args)
assert_equals(response.status_int, 200)
return response
def request_new_review(browser, unit_id, expected_status_code=302):
"""Requests a new assignment to review."""
response = browser.get('reviewdashboard?unit=%s' % unit_id)
assert_contains('Assignments for your review', response.body)
# Extract XSRF token from the page.
match = re.search(
r'<input type="hidden" name="xsrf_token"\s* value="([^"]*)">',
response.body)
assert match
xsrf_token = match.group(1)
args = {'xsrf_token': xsrf_token}
expect_errors = (expected_status_code not in [200, 302])
response = browser.post(
'reviewdashboard?unit=%s' % unit_id, args, expect_errors=expect_errors)
assert_equals(response.status_int, expected_status_code)
if expected_status_code == 302:
assert_equals(response.status_int, expected_status_code)
assert_contains(
'review?unit=%s' % unit_id, response.location)
response = browser.get(response.location)
assert_contains('Assignment to review', response.body)
return response
def view_review(browser, unit_id, review_step_key, expected_status_code=200):
"""View a review page."""
response = browser.get(
'review?unit=%s&key=%s' % (unit_id, review_step_key),
expect_errors=(expected_status_code != 200))
assert_equals(response.status_int, expected_status_code)
if expected_status_code == 200:
assert_contains('Assignment to review', response.body)
return response
def submit_review(
browser, unit_id, review_step_key, args, presubmit_checks=True):
"""Submits a review."""
response = browser.get(
'review?unit=%s&key=%s' % (unit_id, review_step_key))
if presubmit_checks:
assert_contains(
'<script src="assets/js/review-%s.js"></script>' % unit_id,
response.body)
js_response = browser.get('assets/js/review-%s.js' % unit_id)
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'assessmentXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
args['key'] = review_step_key
args['unit_id'] = unit_id
response = browser.post('review', args)
assert_equals(response.status_int, 200)
return response
def add_reviewer(browser, unit_id, reviewee_email, reviewer_email):
"""Adds a reviewer to a submission."""
url_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_email,
'unit_id': unit_id,
}
response = browser.get('/dashboard?%s' % urllib.urlencode(url_params))
# Extract XSRF token from the page.
match = re.search(
r'<input type="hidden" name="xsrf_token"\s* value="([^"]*)">',
response.body)
assert match
xsrf_token = match.group(1)
args = {
'xsrf_token': xsrf_token,
'reviewer_id': reviewer_email,
'reviewee_id': reviewee_email,
'unit_id': unit_id,
}
response = browser.post('/dashboard?action=add_reviewer', args)
return response
def change_name(browser, new_name):
response = browser.get('student/home')
response.form.set('name', new_name)
response = browser.submit(response.form)
assert_equals(response.status_int, 302)
check_profile(browser, new_name)
def unregister(browser):
response = browser.get('student/home')
response = browser.click(response, 'Unenroll')
assert_contains('to unenroll from', response.body)
browser.submit(response.form)
class Permissions(object):
"""Defines who can see what."""
@classmethod
def get_browsable_pages(cls):
"""Returns all pages that can be accessed by a logged-out user."""
return [view_announcements, view_forum, view_course, view_unit,
view_assessments, view_activity]
@classmethod
def get_nonbrowsable_pages(cls):
"""Returns all non-browsable pages."""
return [view_preview, view_my_profile, view_registration]
@classmethod
def get_logged_out_allowed_pages(cls):
"""Returns all pages that a logged-out user can see."""
return [view_announcements, view_preview]
@classmethod
def get_logged_out_denied_pages(cls):
"""Returns all pages that a logged-out user can't see."""
return [view_forum, view_course, view_assessments,
view_unit, view_activity, view_my_profile, view_registration]
@classmethod
def get_enrolled_student_allowed_pages(cls):
"""Returns all pages that a logged-in, enrolled student can see."""
return [view_announcements, view_forum, view_course,
view_assessments, view_unit, view_activity, view_my_profile]
@classmethod
def get_enrolled_student_denied_pages(cls):
"""Returns all pages that a logged-in, enrolled student can't see."""
return [view_registration, view_preview]
@classmethod
def get_unenrolled_student_allowed_pages(cls):
"""Returns all pages that a logged-in, unenrolled student can see."""
return [view_announcements, view_registration, view_preview]
@classmethod
def get_unenrolled_student_denied_pages(cls):
"""Returns all pages that a logged-in, unenrolled student can't see."""
pages = Permissions.get_enrolled_student_allowed_pages()
for allowed in Permissions.get_unenrolled_student_allowed_pages():
if allowed in pages:
pages.remove(allowed)
return pages
@classmethod
def assert_can_browse(cls, browser):
"""Check that pages for a browsing user are visible."""
assert_none_fail(browser, Permissions.get_browsable_pages())
assert_all_fail(browser, Permissions.get_nonbrowsable_pages())
@classmethod
def assert_logged_out(cls, browser):
"""Check that only pages for a logged-out user are visible."""
assert_none_fail(browser, Permissions.get_logged_out_allowed_pages())
assert_all_fail(browser, Permissions.get_logged_out_denied_pages())
@classmethod
def assert_enrolled(cls, browser):
"""Check that only pages for an enrolled student are visible."""
assert_none_fail(
browser, Permissions.get_enrolled_student_allowed_pages())
assert_all_fail(
browser, Permissions.get_enrolled_student_denied_pages())
@classmethod
def assert_unenrolled(cls, browser):
"""Check that only pages for an unenrolled student are visible."""
assert_none_fail(
browser, Permissions.get_unenrolled_student_allowed_pages())
assert_all_fail(
browser, Permissions.get_unenrolled_student_denied_pages())
| apache-2.0 |
emijrp/youtube-dl | youtube_dl/extractor/bpb.py | 145 | 1364 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class BpbIE(InfoExtractor):
IE_DESC = 'Bundeszentrale für politische Bildung'
_VALID_URL = r'http://www\.bpb\.de/mediathek/(?P<id>[0-9]+)/'
_TEST = {
'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
'md5': '0792086e8e2bfbac9cdf27835d5f2093',
'info_dict': {
'id': '297',
'ext': 'mp4',
'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2 class="white">(.*?)</h2>', webpage, 'title')
video_url = self._html_search_regex(
r'(http://film\.bpb\.de/player/dokument_[0-9]+\.mp4)',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage),
}
| unlicense |
nishad-jobsglobal/odoo-marriot | openerp/addons/sync_mail_multi_attach/controllers/__init__.py | 1 | 1157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
# Copyright (C) 2011-today Synconics Technologies Pvt. Ltd. (<http://www.synconics.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import main
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
wdzhou/mantid | scripts/Vates/Diffraction_Workflow.py | 3 | 4117 | # pylint: disable=invalid-name
# Basic parameters for Triphylite Crystal
# Name of the workspaces to create
import mantid.simpleapi as mantid
ws_name = "TOPAZ_3132"
filename = ws_name + "_event.nxs"
ws = mantid.LoadEventNexus(Filename=filename, FilterByTofMin=3000, FilterByTofMax=16000)
# ------------------------------------------------------------------------------------------------------------------------------------------
# Part 1. Basic Reduction
# Spherical Absorption and Lorentz Corrections
ws = mantid.AnvredCorrection(InputWorkspace=ws, LinearScatteringCoef=0.451, LinearAbsorptionCoef=0.993, Radius=0.14)
# Convert to Q space
LabQ = mantid.ConvertToDiffractionMDWorkspace(InputWorkspace=ws, LorentzCorrection='0',
OutputDimensions='Q (lab frame)', SplitInto=2, SplitThreshold=150)
# Find peaks
PeaksLattice = mantid.FindPeaksMD(InputWorkspace=LabQ, MaxPeaks=100)
# 3d integration to centroid peaks
PeaksLattice = mantid.CentroidPeaksMD(InputWorkspace=LabQ,
PeakRadius=0.12, PeaksWorkspace=PeaksLattice)
# Find the UB matrix using the peaks and known lattice parameters
mantid.FindUBUsingLatticeParameters(PeaksWorkspace=PeaksLattice, a=10.3522, b=6.0768, c=4.7276,
alpha=90, beta=90, gamma=90, NumInitial=20, Tolerance=0.12)
# And index to HKL
mantid.IndexPeaks(PeaksWorkspace=PeaksLattice, Tolerance=0.12)
# Integrate peaks in Q space using spheres
PeaksLattice_Integrated = mantid.IntegratePeaksMD(InputWorkspace=LabQ, PeakRadius=0.12,
BackgroundOuterRadius=0.18, BackgroundInnerRadius=0.15,
PeaksWorkspace=PeaksLattice)
# Save for SHELX
mantid.SaveHKL(InputWorkspace=PeaksLattice, Filename=ws_name + '.hkl')
# ------------------------------------------------------------------------------------------------------------------------------------------
# Part 2. Alternative/Advanced Processing Steps
# Find peaks again for FFT
PeaksLatticeFFT = mantid.FindPeaksMD(InputWorkspace=LabQ, MaxPeaks=100)
# 3d integration to centroid peaks
PeaksLatticeFFT = mantid.CentroidPeaksMD(InputWorkspace=LabQ,
PeakRadius=0.12, PeaksWorkspace=PeaksLatticeFFT)
# Find the UB matrix using FFT
mantid.FindUBUsingFFT(PeaksWorkspace=PeaksLatticeFFT, MinD=3.0, MaxD=14.0)
# And index to HKL
mantid.IndexPeaks(PeaksWorkspace=PeaksLatticeFFT, Tolerance=0.12)
# Integrate peaks in Q space using spheres
PeaksLatticeFFT = mantid.IntegratePeaksMD(InputWorkspace=LabQ, PeakRadius=0.12,
BackgroundOuterRadius=0.18, BackgroundInnerRadius=0.15,
PeaksWorkspace=PeaksLatticeFFT)
# Save for SHELX
mantid.SaveHKL(InputWorkspace=PeaksLatticeFFT, Filename=ws_name + '.hkl')
# ------------------------------------------------------------------------------------------------------------------------------------------
# Part 3. Utilising the UB
# Copy the UB matrix back to the original workspace
mantid.CopySample(InputWorkspace=PeaksLattice, OutputWorkspace=ws,
CopyName='0', CopyMaterial='0', CopyEnvironment='0', CopyShape='0', CopyLattice=1)
# Convert to reciprocal space, in the sample frame
HKL = mantid.ConvertToDiffractionMDWorkspace(InputWorkspace=ws,
OutputDimensions='HKL', LorentzCorrection='0', SplitInto='2',
SplitThreshold='150')
# ------------------------------------------------------------------------------------------------------------------------------------------
# Part 4. Displaying
# Bin to a regular grid
Binned = mantid.BinMD(InputWorkspace=HKL, AlignedDim0='[H,0,0], -15, 5, 150', AlignedDim1='[0,K,0], -0, 10, 50',
AlignedDim2='[0,0,L], 0, 12, 150')
# Show in slice Viewer
sv = mantid.plotSlice(Binned, xydim=('[H,0,0]', '[0,0,L]'), slicepoint=[0, +9, 0], colorscalelog=True)
sv.setColorMapBackground(0, 0, 0)
| gpl-3.0 |
ctrezevant/doorMan | Applications/Command Line/doorctl.py | 1 | 1846 | #!/usr/bin/env python
import urllib2, json, sys
CONFIG = {
'API_KEY': ' ',
'API_HOST': 'http://<ip>',
'DOOR_TARGET': 0
}
def main():
if 'open' in sys.argv:
print 'sent open: %s' % sendCmd('/set/open')
if 'close' in sys.argv:
print 'sent close: %s' % sendCmd('/set/close')
if 'cycle' in sys.argv:
print 'sent cycle: %s' % sendCmd('/set/cycle')
if 'lockout' in sys.argv:
print 'sent lockout: %s' % sendCmd('/set/lockout')
if 'status' in sys.argv:
checkStatus()
if 'state' in sys.argv:
checkStatus()
if 'open' or 'close' or 'cycle' or 'lockout' or 'status' or 'state' in sys.argv:
sys.exit(0)
print "usage:\n doorctl open|close|cycle|lockout|status|help"
def checkStatus():
ul2 = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=0))
api_data = {
'method': '/get/state',
'door_id': CONFIG['DOOR_TARGET'],
'api_key': CONFIG['API_KEY']
}
postData = json.dumps(api_data)
try:
api_response = json.loads(ul2.open(CONFIG['API_HOST'], postData).read())
if(api_response['state'] == 0):
response_text = 'open'
else:
response_text = 'closed'
if(api_response['lockout']):
response_text += ' and locked'
except Exception:
response_text = "query state failed"
print response_text
def sendCmd(cmd):
ul2 = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=0))
api_data = {
'method': cmd,
'door_id': CONFIG['DOOR_TARGET'],
'api_key': CONFIG['API_KEY']
}
postData = json.dumps(api_data)
try:
api_response = json.loads(ul2.open(CONFIG['API_HOST'], postData).read())
except Exception:
api_response = {'command_sent': 'failed'}
return api_response['command_sent']
if __name__ == "__main__":
main()
| mit |
fengshao0907/vitess | test/queryservice_test.py | 1 | 2631 | #!/usr/bin/env python
import logging
import optparse
import traceback
import unittest
import sys
import os
import utils
import framework
from queryservice_tests import cache_tests
from queryservice_tests import nocache_tests
from queryservice_tests import stream_tests
from queryservice_tests import status_tests
from queryservice_tests import test_env
from mysql_flavor import set_mysql_flavor
from protocols_flavor import set_protocols_flavor
from topo_flavor.server import set_topo_server_flavor
def main():
parser = optparse.OptionParser(usage='usage: %prog [options] [test_names]')
parser.add_option('-m', '--memcache', action='store_true', default=False,
help='starts a memcache d, and tests rowcache')
parser.add_option(
'-e', '--env', default='vttablet',
help='Environment that will be used. Valid options: vttablet, vtocc')
utils.add_options(parser)
(options, args) = parser.parse_args()
logging.getLogger().setLevel(logging.ERROR)
utils.set_options(options)
run_tests(options, args)
def run_tests(options, args):
suite = unittest.TestSuite()
if args:
if args[0] == 'teardown':
test_env.TestEnv(options.env).tearDown()
exit(0)
for arg in args:
if hasattr(nocache_tests.TestNocache, arg):
suite.addTest(nocache_tests.TestNocache(arg))
elif hasattr(stream_tests.TestStream, arg):
suite.addTest(stream_tests.TestStream(arg))
elif hasattr(cache_tests.TestCache, arg) and options.memcache:
suite.addTest(cache_tests.TestCache(arg))
elif hasattr(cache_tests.TestWillNotBeCached, arg) and options.memcache:
suite.addTest(cache_tests.TestWillNotBeCached(arg))
else:
raise Exception(arg, 'not found in tests')
else:
modules = [nocache_tests, stream_tests, status_tests]
if options.memcache:
modules.append(cache_tests)
for m in modules:
suite.addTests(unittest.TestLoader().loadTestsFromModule(m))
env = test_env.TestEnv(options.env)
try:
env.memcache = options.memcache
env.setUp()
print 'Starting queryservice_test.py: %s' % options.env
sys.stdout.flush()
framework.TestCase.setenv(env)
result = unittest.TextTestRunner(
verbosity=options.verbose, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception('test failures')
finally:
if not options.skip_teardown:
env.tearDown()
if options.keep_logs:
print('Leaving temporary files behind (--keep-logs), please '
'clean up before next run: ' + os.environ['VTDATAROOT'])
if __name__ == '__main__':
main()
| bsd-3-clause |
kawasaki2013/python-for-android-x86 | python-build/python-libs/gdata/src/atom/data.py | 136 | 8060 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
ATOM_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_TEMPLATE_V1 = '{http://purl.org/atom/app#}%s'
APP_TEMPLATE_V2 = '{http://www.w3.org/2007/app}%s'
class Name(atom.core.XmlElement):
"""The atom:name element."""
_qname = ATOM_TEMPLATE % 'name'
class Email(atom.core.XmlElement):
"""The atom:email element."""
_qname = ATOM_TEMPLATE % 'email'
class Uri(atom.core.XmlElement):
"""The atom:uri element."""
_qname = ATOM_TEMPLATE % 'uri'
class Person(atom.core.XmlElement):
"""A foundation class which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
name = Name
email = Email
uri = Uri
class Author(Person):
"""The atom:author element.
An author is a required element in Feed unless each Entry contains an Author.
"""
_qname = ATOM_TEMPLATE % 'author'
class Contributor(Person):
"""The atom:contributor element."""
_qname = ATOM_TEMPLATE % 'contributor'
class Link(atom.core.XmlElement):
"""The atom:link element."""
_qname = ATOM_TEMPLATE % 'link'
href = 'href'
rel = 'rel'
type = 'type'
hreflang = 'hreflang'
title = 'title'
length = 'length'
class Generator(atom.core.XmlElement):
"""The atom:generator element."""
_qname = ATOM_TEMPLATE % 'generator'
uri = 'uri'
version = 'version'
class Text(atom.core.XmlElement):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
type = 'type'
class Title(Text):
"""The atom:title element."""
_qname = ATOM_TEMPLATE % 'title'
class Subtitle(Text):
"""The atom:subtitle element."""
_qname = ATOM_TEMPLATE % 'subtitle'
class Rights(Text):
"""The atom:rights element."""
_qname = ATOM_TEMPLATE % 'rights'
class Summary(Text):
"""The atom:summary element."""
_qname = ATOM_TEMPLATE % 'summary'
class Content(Text):
"""The atom:content element."""
_qname = ATOM_TEMPLATE % 'content'
src = 'src'
class Category(atom.core.XmlElement):
"""The atom:category element."""
_qname = ATOM_TEMPLATE % 'category'
term = 'term'
scheme = 'scheme'
label = 'label'
class Id(atom.core.XmlElement):
"""The atom:id element."""
_qname = ATOM_TEMPLATE % 'id'
class Icon(atom.core.XmlElement):
"""The atom:icon element."""
_qname = ATOM_TEMPLATE % 'icon'
class Logo(atom.core.XmlElement):
"""The atom:logo element."""
_qname = ATOM_TEMPLATE % 'logo'
class Draft(atom.core.XmlElement):
"""The app:draft element which indicates if this entry should be public."""
_qname = (APP_TEMPLATE_V1 % 'draft', APP_TEMPLATE_V2 % 'draft')
class Control(atom.core.XmlElement):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_qname = (APP_TEMPLATE_V1 % 'control', APP_TEMPLATE_V2 % 'control')
draft = Draft
class Date(atom.core.XmlElement):
"""A parent class for atom:updated, published, etc."""
class Updated(Date):
"""The atom:updated element."""
_qname = ATOM_TEMPLATE % 'updated'
class Published(Date):
"""The atom:published element."""
_qname = ATOM_TEMPLATE % 'published'
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def find_url(self, rel):
"""Returns the URL in a link with the desired rel value."""
for link in self.link:
if link.rel == rel and link.href:
return link.href
return None
FindUrl = find_url
def get_link(self, rel):
"""Returns a link object which has the desired rel value.
If you are interested in the URL instead of the link object,
consider using find_url instead.
"""
for link in self.link:
if link.rel == rel and link.href:
return link
return None
GetLink = get_link
def find_self_link(self):
"""Find the first link with rel set to 'self'
Returns:
A str containing the link's href or None if none of the links had rel
equal to 'self'
"""
return self.find_url('self')
FindSelfLink = find_self_link
def get_self_link(self):
return self.get_link('self')
GetSelfLink = get_self_link
def find_edit_link(self):
return self.find_url('edit')
FindEditLink = find_edit_link
def get_edit_link(self):
return self.get_link('edit')
GetEditLink = get_edit_link
def find_edit_media_link(self):
link = self.find_url('edit-media')
# Search for media-edit as well since Picasa API used media-edit instead.
if link is None:
return self.find_url('media-edit')
return link
FindEditMediaLink = find_edit_media_link
def get_edit_media_link(self):
link = self.get_link('edit-media')
if link is None:
return self.get_link('media-edit')
return link
GetEditMediaLink = get_edit_media_link
def find_next_link(self):
return self.find_url('next')
FindNextLink = find_next_link
def get_next_link(self):
return self.get_link('next')
GetNextLink = get_next_link
def find_license_link(self):
return self.find_url('license')
FindLicenseLink = find_license_link
def get_license_link(self):
return self.get_link('license')
GetLicenseLink = get_license_link
def find_alternate_link(self):
return self.find_url('alternate')
FindAlternateLink = find_alternate_link
def get_alternate_link(self):
return self.get_link('alternate')
GetAlternateLink = get_alternate_link
class FeedEntryParent(atom.core.XmlElement, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
author = [Author]
category = [Category]
contributor = [Contributor]
id = Id
link = [Link]
rights = Rights
title = Title
updated = Updated
def __init__(self, atom_id=None, text=None, *args, **kwargs):
if atom_id is not None:
self.id = atom_id
atom.core.XmlElement.__init__(self, text=text, *args, **kwargs)
class Source(FeedEntryParent):
"""The atom:source element."""
_qname = ATOM_TEMPLATE % 'source'
generator = Generator
icon = Icon
logo = Logo
subtitle = Subtitle
class Entry(FeedEntryParent):
"""The atom:entry element."""
_qname = ATOM_TEMPLATE % 'entry'
content = Content
published = Published
source = Source
summary = Summary
control = Control
class Feed(Source):
_qname = ATOM_TEMPLATE % 'feed'
entry = [Entry]
class ExtensionElement(atom.core.XmlElement):
"""Provided for backwards compatibility to the v1 atom.ExtensionElement."""
def __init__(self, tag=None, namespace=None, attributes=None,
children=None, text=None, *args, **kwargs):
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
self.children = children or []
self.attributes = attributes or {}
self.text = text
_BecomeChildElement = atom.core.XmlElement._become_child
| apache-2.0 |
priyaganti/rockstor-core | src/rockstor/storageadmin/views/clone_helpers.py | 1 | 2535 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from storageadmin.models import (Share, Snapshot)
from storageadmin.util import handle_exception
from fs.btrfs import (add_clone, share_id, update_quota)
from rest_framework.response import Response
from storageadmin.serializers import ShareSerializer
import re
from django.conf import settings
def create_clone(share, new_name, request, logger, snapshot=None):
# if snapshot is None, create clone of the share.
# If it's not, then clone it.
if (re.match(settings.SHARE_REGEX + '$', new_name) is None):
e_msg = ('Clone name is invalid. It must start with a letter and can'
' contain letters, digits, _, . and - characters')
handle_exception(Exception(e_msg), request)
if (Share.objects.filter(name=new_name).exists()):
e_msg = ('Another Share with name: %s already exists.' % new_name)
handle_exception(Exception(e_msg), request)
if (Snapshot.objects.filter(share=share, name=new_name).exists()):
e_msg = ('Snapshot with name: %s already exists for the '
'share: %s. Choose a different name' %
(new_name, share.name))
handle_exception(Exception(e_msg), request)
try:
share_name = share.subvol_name
snap = None
if (snapshot is not None):
snap = snapshot.real_name
add_clone(share.pool, share_name, new_name, snapshot=snap)
snap_id = share_id(share.pool, new_name)
qgroup_id = ('0/%s' % snap_id)
update_quota(share.pool, qgroup_id, share.size * 1024)
new_share = Share(pool=share.pool, qgroup=qgroup_id, name=new_name,
size=share.size, subvol_name=new_name)
new_share.save()
return Response(ShareSerializer(new_share).data)
except Exception as e:
handle_exception(e, request)
| gpl-3.0 |
javachengwc/hue | apps/beeswax/src/beeswax/migrations/0003_auto__add_field_queryhistory_server_name__add_field_queryhistory_serve.py | 40 | 7185 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'QueryHistory.server_name'
db.add_column('beeswax_queryhistory', 'server_name', self.gf('django.db.models.fields.CharField')(default='', max_length=128), keep_default=False)
# Adding field 'QueryHistory.server_host'
db.add_column('beeswax_queryhistory', 'server_host', self.gf('django.db.models.fields.CharField')(default='', max_length=128), keep_default=False)
# Adding field 'QueryHistory.server_port'
db.add_column('beeswax_queryhistory', 'server_port', self.gf('django.db.models.fields.SmallIntegerField')(default=0), keep_default=False)
# Changing field 'QueryHistory.query'
db.alter_column('beeswax_queryhistory', 'query', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Deleting field 'QueryHistory.server_name'
db.delete_column('beeswax_queryhistory', 'server_name')
# Deleting field 'QueryHistory.server_host'
db.delete_column('beeswax_queryhistory', 'server_host')
# Deleting field 'QueryHistory.server_port'
db.delete_column('beeswax_queryhistory', 'server_port')
# Changing field 'QueryHistory.query'
db.alter_column('beeswax_queryhistory', 'query', self.gf('django.db.models.fields.CharField')(max_length=1024))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beeswax.metainstall': {
'Meta': {'object_name': 'MetaInstall'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_example': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'beeswax.queryhistory': {
'Meta': {'object_name': 'QueryHistory'},
'design': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beeswax.SavedQuery']", 'null': 'True'}),
'has_results': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_state': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'log_context': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'server_host': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'server_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'server_port': ('django.db.models.fields.SmallIntegerField', [], {'default': "''"}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'beeswax.savedquery': {
'Meta': {'object_name': 'SavedQuery'},
'data': ('django.db.models.fields.TextField', [], {'max_length': '65536'}),
'desc': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'mtime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['beeswax']
| apache-2.0 |
lpredova/pybookie | server/sources/footbal_db.py | 1 | 3991 | # coding=utf-8
import json
import os
class FootballDB:
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
groups_file = BASE_DIR + '/sources/groups.json'
wc_history_file = BASE_DIR + '/sources/wc_history'
wc_team_file = BASE_DIR + '/sources/squads/'
top_teams = ['RealMadrid(ESP)', 'Barcelona(ESP)', 'Chelsea(ENG)', 'ManchesterCity(ENG)', 'ParisSaint-Germain(FRA)',
'BayernMunich(GER)', 'Internazionale(ITA)', 'Napoli(ITA)', 'ManchesterUnited(ENG)', 'Arsenal(ENG)',
'Liverpool(ENG)', 'Juventus(ITA)', 'BorussiaDortmund(GER)', 'AtléticoMadrid(ESP)']
def __init__(self):
pass
@staticmethod
def get_team_by_id(team_id):
data = json.loads(FootballDB.get_games())
result = None
for group in data:
for team in group['teams']:
if int(team['id']) == int(team_id):
result = team['team']
return result
@staticmethod
def get_ranking(team_name):
return int(FootballDB.get_wc_history(team_name, 0))
@staticmethod
def get_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 2))
@staticmethod
def get_won_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 3))
@staticmethod
def get_draw_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 4))
@staticmethod
def get_lost_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 5))
@staticmethod
def get_goal_difference_wc_games_played(team_name):
gd = FootballDB.get_wc_history(team_name, 6)
gd = gd.split(':')
goals_for = int(gd[0])
goals_against = int(gd[1])
return goals_for - goals_against
@staticmethod
def get_wc_points(team_name):
return int(FootballDB.get_wc_history(team_name, 7))
@staticmethod
def get_wc_participations(team_name):
return int(FootballDB.get_wc_history(team_name, 8))
@staticmethod
def get_wc_titles(team_name):
titles = FootballDB.get_wc_history(team_name, 9)
try:
if titles.isalpha() and int(titles) != 0:
titles = titles[0]
return int(titles)
else:
return 0
except Exception:
return 0
@staticmethod
def get_wc_history(team, result_row_index):
path = FootballDB.wc_history_file
if os.path.isfile(path):
f = open(path)
for line in f:
if line[0].isdigit():
row = line.replace('\n', '')
row = row.replace(' ', '')
row = row.split('|')
if row[1] == team.replace(' ', ''):
f.close()
try:
return row[result_row_index]
except BaseException:
return 0
@staticmethod
def get_wc_team_player_ratings(team):
path = '%s%s.txt' % (FootballDB.wc_team_file, (team.replace(' ', '-')))
path = path.lower()
team_rating = 0
if os.path.isfile(path):
f = open(path)
for line in f:
try:
row = line.split('##')
row = row[1].replace(' ', '').split(',')
team_rating += int(row[0])
team_name = row[1].replace('\n', '')
if team_name in FootballDB.top_teams:
team_rating += 10
except Exception:
pass
return team_rating
@staticmethod
def get_games():
data = None
path = FootballDB.groups_file
if os.path.isfile(path):
with open(path, 'r') as football_teams:
data = football_teams.read().replace('\n', '')
return data
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/plugins/viasat.py | 5 | 4436 | import re
from streamlink import NoStreamsError
from streamlink.exceptions import PluginError
from streamlink.plugin import Plugin
from streamlink.plugin.api import StreamMapper, validate
from streamlink.stream import HDSStream, HLSStream, RTMPStream
from streamlink.utils import rtmpparse
STREAM_API_URL = "https://playapi.mtgx.tv/v3/videos/stream/{0}"
_swf_url_re = re.compile(r"data-flashplayer-url=\"([^\"]+)\"")
_player_data_re = re.compile(r"window.fluxData\s*=\s*JSON.parse\(\"(.+)\"\);")
_stream_schema = validate.Schema(
validate.any(
None,
validate.all({"msg": validate.text}),
validate.all({
"streams": validate.all(
{validate.text: validate.any(validate.text, int, None)},
validate.filter(lambda k, v: isinstance(v, validate.text))
)
}, validate.get("streams"))
)
)
class Viasat(Plugin):
"""Streamlink Plugin for Viasat"""
_iframe_re = re.compile(r"""<iframe.+src=["'](?P<url>[^"']+)["'].+allowfullscreen""")
_image_re = re.compile(r"""<meta\sproperty=["']og:image["']\scontent=".+/(?P<stream_id>\d+)/[^/]+\.jpg""")
_url_re = re.compile(r"""https?://(?:www\.)?
(?:
juicyplay\.dk
|
play\.nova\.bg
|
(?:tvplay\.)?
skaties\.lv
|
(?:(?:tv3)?play\.)?
tv3\.(?:dk|ee|lt)
|
tv6play\.no
|
viafree\.(?:dk|no|se|fi)
)
/(?:
(?:
.+/
|
embed\?id=
)
(?P<stream_id>\d+)
)?
""", re.VERBOSE)
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
def _get_swf_url(self):
res = self.session.http.get(self.url)
match = _swf_url_re.search(res.text)
if not match:
raise PluginError("Unable to find SWF URL in the HTML")
return match.group(1)
def _create_dynamic_streams(self, stream_type, parser, video):
try:
streams = parser(self.session, video[1])
return streams.items()
except IOError as err:
self.logger.error("Failed to extract {0} streams: {1}", stream_type, err)
def _create_rtmp_stream(self, video):
name, stream_url = video
params = {
"rtmp": stream_url,
"pageUrl": self.url,
"swfVfy": self._get_swf_url(),
}
if stream_url.endswith(".mp4"):
tcurl, playpath = rtmpparse(stream_url)
params["rtmp"] = tcurl
params["playpath"] = playpath
else:
params["live"] = True
return name, RTMPStream(self.session, params)
def _extract_streams(self, stream_id):
res = self.session.http.get(STREAM_API_URL.format(stream_id), raise_for_status=False)
stream_info = self.session.http.json(res, schema=_stream_schema)
if stream_info.get("msg"):
# error message
self.logger.error(stream_info.get("msg"))
raise NoStreamsError(self.url)
mapper = StreamMapper(lambda pattern, video: re.search(pattern, video[1]))
mapper.map(
r"/\w+\.m3u8",
self._create_dynamic_streams,
"HLS", HLSStream.parse_variant_playlist
)
mapper.map(
r"/\w+\.f4m",
self._create_dynamic_streams,
"HDS", HDSStream.parse_manifest
)
mapper.map(r"^rtmp://", self._create_rtmp_stream)
return mapper(stream_info.items())
def _get_stream_id(self, text):
"""Try to find a stream_id"""
m = self._image_re.search(text)
if m:
return m.group("stream_id")
def _get_iframe(self, text):
"""Fallback if no stream_id was found before"""
m = self._iframe_re.search(text)
if m:
return self.session.streams(m.group("url"))
def _get_streams(self):
match = self._url_re.match(self.url)
stream_id = match.group("stream_id")
if not stream_id:
text = self.session.http.get(self.url).text
stream_id = self._get_stream_id(text)
if not stream_id:
return self._get_iframe(text)
if stream_id:
return self._extract_streams(stream_id)
__plugin__ = Viasat
| gpl-2.0 |
jules185/IoT_Hackathon | .homeassistant/deps/psutil/tests/test_sunos.py | 6 | 1322 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sun OS specific tests."""
import os
import psutil
from psutil import SUNOS
from psutil.tests import run_test_module_by_name
from psutil.tests import sh
from psutil.tests import unittest
@unittest.skipUnless(SUNOS, "SUNOS only")
class SunOSSpecificTestCase(unittest.TestCase):
def test_swap_memory(self):
out = sh('env PATH=/usr/sbin:/sbin:%s swap -l' % os.environ['PATH'])
lines = out.strip().split('\n')[1:]
if not lines:
raise ValueError('no swap device(s) configured')
total = free = 0
for line in lines:
line = line.split()
t, f = line[-2:]
total += int(int(t) * 512)
free += int(int(f) * 512)
used = total - free
psutil_swap = psutil.swap_memory()
self.assertEqual(psutil_swap.total, total)
self.assertEqual(psutil_swap.used, used)
self.assertEqual(psutil_swap.free, free)
def test_cpu_count(self):
out = sh("/usr/sbin/psrinfo")
self.assertEqual(psutil.cpu_count(), len(out.split('\n')))
if __name__ == '__main__':
run_test_module_by_name(__file__)
| mit |
gautamMalu/rootfs_xen_arndale | usr/lib/python2.7/opcode.py | 264 | 5474 |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG"]
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('STOP_CODE', 0)
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('ROT_FOUR', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_CONVERT', 13)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_DIVIDE', 21)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('SLICE+0', 30)
def_op('SLICE+1', 31)
def_op('SLICE+2', 32)
def_op('SLICE+3', 33)
def_op('STORE_SLICE+0', 40)
def_op('STORE_SLICE+1', 41)
def_op('STORE_SLICE+2', 42)
def_op('STORE_SLICE+3', 43)
def_op('DELETE_SLICE+0', 50)
def_op('DELETE_SLICE+1', 51)
def_op('DELETE_SLICE+2', 52)
def_op('DELETE_SLICE+3', 53)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_DIVIDE', 58)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('PRINT_ITEM', 71)
def_op('PRINT_NEWLINE', 72)
def_op('PRINT_ITEM_TO', 73)
def_op('PRINT_NEWLINE_TO', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('LOAD_LOCALS', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('EXEC_STMT', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('BUILD_CLASS', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('LIST_APPEND', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('DUP_TOPX', 99) # number of items to duplicate
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
jrel_op('SETUP_WITH', 143)
def_op('EXTENDED_ARG', 145)
EXTENDED_ARG = 145
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
del def_op, name_op, jrel_op, jabs_op
| gpl-2.0 |
DiamondLightSource/diffcalc | test/diffcalc/gdasupport/minigda/test_command.py | 1 | 5417 | ###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
import unittest
import diffcalc.gdasupport.minigda.command
from diffcalc.gdasupport.minigda.command import Pos, Scan, ScanDataPrinter
from diffcalc.gdasupport.minigda.scannable import \
MultiInputExtraFieldsDummyScannable, SingleFieldDummyScannable
class BadSingleFieldDummyScannable(SingleFieldDummyScannable):
def getPosition(self):
raise Exception("Problem")
class NoneReturningSingleFieldDummyScannable(SingleFieldDummyScannable):
def getPosition(self):
return None
class TestPos(object):
def setup_method(self):
self.dummyMainNamespace = namespace = {}
namespace['notAScannable'] = 3.124
namespace['scnA'] = SingleFieldDummyScannable('scnA')
namespace['scnB'] = SingleFieldDummyScannable('scnB')
namespace['scnC'] = SingleFieldDummyScannable('scnC')
namespace['scnD'] = SingleFieldDummyScannable('scnD')
namespace['scnNone'] = \
NoneReturningSingleFieldDummyScannable('scnNone')
namespace['scnBad'] = BadSingleFieldDummyScannable('scnBad')
diffcalc.gdasupport.minigda.command.ROOT_NAMESPACE_DICT = \
self.dummyMainNamespace
self.pos = Pos()
def testPosReturningReportWithRead(self):
scnA = self.dummyMainNamespace['scnA']
assert self.pos.posReturningReport(scnA) == 'scnA: 0.0000'
def testPosReturningReportWithMove(self):
scnA = self.dummyMainNamespace['scnA']
assert self.pos.posReturningReport(scnA, 1.123) == 'scnA: 1.1230'
def test__call__(self):
scnA = self.dummyMainNamespace['scnA']
self.pos.__call__(scnA)
self.pos.__call__(scnA, 4.321)
print "*"
self.pos.__call__()
print "*"
def testPosReturningReportWithMultiFieldScannables(self):
scn = MultiInputExtraFieldsDummyScannable('mie', ['i1', 'i2'], ['e1'])
assert (self.pos.posReturningReport(scn)
== 'mie: i1: 0.0000 i2: 0.0000 e1: 100.0000 ')
def testPosReturningReportWithBadScannable(self):
scnBad = self.dummyMainNamespace['scnBad']
assert self.pos.posReturningReport(scnBad) == "scnBad: Error: Problem"
assert (self.pos.posReturningReport(scnBad, 4.321)
== "scnBad: Error: Problem")
def testPosReturningReportWithNoneReturningScannable(self):
scnNone = self.dummyMainNamespace['scnNone']
assert self.pos.posReturningReport(scnNone) == "scnNone: ---"
assert self.pos.posReturningReport(scnNone, 4.321) == "scnNone: ---"
class TestScan(object):
def setup_method(self):
self.scan = Scan([ScanDataPrinter()])
def test__parseScanArgsIntoScannableArgGroups(self):
scnA = SingleFieldDummyScannable('scnA')
scnB = SingleFieldDummyScannable('scnB')
scnC = SingleFieldDummyScannable('scnC')
scnD = SingleFieldDummyScannable('scnD')
scanargs = (scnA, 1, 2, 3, scnB, [4, 5, 6], scnC, scnD, 1.123456)
r = self.scan._parseScanArgsIntoScannableArgGroups(scanargs)
result = [r[0].scannable, r[0].args, r[1].scannable, r[1].args,
r[2].scannable, r[2].args, r[3].scannable, r[3].args]
desired = [scnA, [1, 2, 3], scnB, [[4, 5, 6], ], scnC, list(), scnD,
[1.123456]]
assert result == desired
def test__reorderGroupsAccordingToLevel(self):
scn4 = SingleFieldDummyScannable('scn4')
scn4.setLevel(4)
scn5a = SingleFieldDummyScannable('scn5a')
scn5a.setLevel(5)
scn5b = SingleFieldDummyScannable('scn5b')
scn5b.setLevel(5)
scn6 = SingleFieldDummyScannable('scn6')
scn6.setLevel(6)
def t(scanargs):
groups = self.scan._parseScanArgsIntoScannableArgGroups(scanargs)
r = self.scan._reorderInnerGroupsAccordingToLevel(groups)
return [r[0].scannable, r[1].scannable, r[2].scannable,
r[3].scannable]
assert (t((scn5a, 1, 2, 3, scn6, 1, scn5b, scn4))
== [scn5a, scn4, scn5b, scn6])
assert (t((scn5a, 1, 3, scn6, 1, scn5b, scn4))
== [scn4, scn5a, scn5b, scn6])
def test__Frange(self):
assert self.scan._frange(1, 1.3, .1) == [1.0, 1.1, 1.2, 1.3]
def test__Call__(self):
scn4 = SingleFieldDummyScannable('scn4')
scn4.setLevel(4)
scn5a = SingleFieldDummyScannable('scn5a')
scn5a.setLevel(5)
scn5b = SingleFieldDummyScannable('scn5b')
scn5b.setLevel(5)
scn6 = SingleFieldDummyScannable('scn6')
scn6.setLevel(6)
self.scan.__call__(scn5a, 1, 3, 1, scn6, 1, scn5b, scn4)
| gpl-3.0 |
RichardLitt/wyrd-django-dev | tests/test_sqlite.py | 64 | 1142 | # This is an example test settings file for use with the Django test suite.
#
# The 'sqlite3' backend requires only the ENGINE setting (an in-
# memory database will be used). All other backends will require a
# NAME and potentially authentication information. See the
# following section in the docs for more information:
#
# https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/unit-tests/
#
# The different databases that Django supports behave differently in certain
# situations, so it is recommended to run the test suite against as many
# database backends as possible. You may want to create a separate settings
# file for each of the backends you test against.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
SECRET_KEY = "django_tests_secret_key"
# To speed up tests under SQLite we use the MD5 hasher as the default one.
# This should not be needed under other databases, as the relative speedup
# is only marginal there.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
| bsd-3-clause |
KaiRo-at/socorro | socorro/unittest/external/es/test_index_creator.py | 5 | 1538 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import ok_
from socorro.external.es.index_creator import IndexCreator
from socorro.unittest.external.es.base import ElasticsearchTestCase
# Uncomment these lines to decrease verbosity of the elasticsearch library
# while running unit tests.
# import logging
# logging.getLogger('elasticsearch').setLevel(logging.ERROR)
# logging.getLogger('requests').setLevel(logging.ERROR)
class IntegrationTestIndexCreator(ElasticsearchTestCase):
def __init__(self, *args, **kwargs):
super(IntegrationTestIndexCreator, self).__init__(*args, **kwargs)
self.config = self.get_tuned_config(IndexCreator)
def test_create_index(self):
index_creator = IndexCreator(config=self.config)
index_creator.create_index(
self.config.elasticsearch.elasticsearch_index,
{'foo': 'bar'}
)
ok_(
self.index_client.exists(
self.config.elasticsearch.elasticsearch_index
)
)
def test_create_socorro_index(self):
index_creator = IndexCreator(config=self.config)
index_creator.create_socorro_index(
self.config.elasticsearch.elasticsearch_index
)
ok_(
self.index_client.exists(
self.config.elasticsearch.elasticsearch_index
)
)
| mpl-2.0 |
GiantSteps/essentia | test/src/unittest/spectral/test_spectralcontrast.py | 10 | 4457 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestSpectralContrast(TestCase):
def testRegression(self):
# Simple regression test, comparing to reference values
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/musicbox.wav'),
sampleRate = 44100)()
fft = Spectrum()
window = Windowing(type = 'hamming')
SC = SpectralContrast(sampleRate = 44100)
expected = 0
sc = []
valleys = []
for frame in FrameGenerator(audio, frameSize = 2048, hopSize = 512):
result = SC(fft(window(frame)))
self.assert_(not any(numpy.isnan(result[0])))
self.assert_(not any(numpy.isinf(result[1])))
sc += [result[0]]
valleys += [result[1]]
self.assertAlmostEqual(numpy.mean(sc), -0.604606057431, 1e-5)
self.assertAlmostEqual(numpy.mean(valleys), -8.55062127501, 1e-5)
def testZero(self):
SC = SpectralContrast(sampleRate = 44100)
sc, valleys = SC(zeros(1025))
self.assertAlmostEqual(numpy.mean(sc), -1)
self.assertAlmostEqual(numpy.mean(valleys), numpy.log(1e-30))
def testOnes(self):
SC = SpectralContrast(sampleRate = 44100)
sc, valleys = SC(ones(1025))
self.assertAlmostEqual(numpy.mean(sc), -1)
self.assertAlmostEqual(numpy.mean(valleys), 0)
def testConstant(self):
SC = SpectralContrast(sampleRate = 44100)
sc, valleys = SC([0.5]*1025)
self.assertAlmostEqual(numpy.mean(sc), -1)
self.assertAlmostEqual(numpy.mean(valleys),-0.6931471825, 1e-7)
def testCompare(self):
spec0 = [1]*1025
spec1 = [1]*1015 + [0]*10
spec2 = [1]*10 + [0]*1015
sr = 44100
SC = SpectralContrast(sampleRate = sr, highFrequencyBound = sr/2)
sc0 = SC(spec0)
sc1 = SC(spec1)
sc2 = SC(spec2)
self.assertTrue(numpy.mean(sc1[0]) < numpy.mean(sc2[0]))
self.assertTrue(numpy.mean(sc0[0]) < numpy.mean(sc2[0]))
self.assertTrue(numpy.mean(sc0[0]) < numpy.mean(sc1[0]))
def testInvalidParam(self):
self.assertConfigureFails(SpectralContrast(), { 'frameSize': 0 })
self.assertConfigureFails(SpectralContrast(), { 'frameSize': 1 })
self.assertConfigureFails(SpectralContrast(), { 'sampleRate': 0 })
self.assertConfigureFails(SpectralContrast(), { 'numberBands': 0 })
self.assertConfigureFails(SpectralContrast(), { 'lowFrequencyBound': -1 })
self.assertConfigureFails(SpectralContrast(), { 'highFrequencyBound': 40000 })
self.assertConfigureFails(SpectralContrast(), { 'neighbourRatio': 1.5 })
self.assertConfigureFails(SpectralContrast(), { 'staticDistribution': 1.5 })
# lower bound cannot be larger than higher band:
self.assertConfigureFails(SpectralContrast(), { 'lowFrequencyBound': 11000,
'highFrequencyBound': 5000 })
def testEmpty(self):
SC = SpectralContrast(sampleRate = 44100)
self.assertComputeFails(SC, [])
def testOneElement(self):
# input spectrum must be 0.5*framesize
SC = SpectralContrast(sampleRate = 44100)
self.assertComputeFails(SC, [1])
def testSpectrumSizeSmallerThanNumberOfBands(self):
SC = SpectralContrast(sampleRate = 44100, frameSize = 4)
sc = SC([1,1,1])
self.assertAlmostEquals(numpy.mean(sc[0]), -2.7182817459)
self.assertAlmostEquals(numpy.mean(sc[1]), 0)
suite = allTests(TestSpectralContrast)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 |
solin319/incubator-mxnet | example/python-howto/debug_conv.py | 44 | 1319 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
data_shape = (1,3,5,5)
class SimpleData(object):
def __init__(self, data):
self.data = data
data = mx.sym.Variable('data')
conv = mx.sym.Convolution(data=data, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=1)
mon = mx.mon.Monitor(1)
mod = mx.mod.Module(conv)
mod.bind(data_shapes=[('data', data_shape)])
mod._exec_group.install_monitor(mon)
mod.init_params()
input_data = mx.nd.ones(data_shape)
mod.forward(data_batch=SimpleData([input_data]))
res = mod.get_outputs()[0].asnumpy()
print(res)
| apache-2.0 |
bikashgupta11/javarobot | src/main/resources/jython/Lib/unittest/test/test_case.py | 89 | 43816 | import difflib
import pprint
import pickle
import re
import sys
from copy import deepcopy
from test import test_support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult, ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest'))
,(Test.Foo('test1'), Test.Bar('test1'))
,(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addError',
'tearDown', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addFailure',
'tearDown', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), basestring)
# "If result is omitted or None, a temporary result object is created
# and used, but is not made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return LoggingResult(events)
# Make run() find a result object on its own
Foo('test').run()
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertNotEqual(s1, s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) is type(b) is SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with test_support.check_warnings(("", UnicodeWarning)):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': u'\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = u'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = u'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertItemsEqual(self):
a = object()
self.assertItemsEqual([1, 2, 3], [3, 2, 1])
self.assertItemsEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertItemsEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertItemsEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertItemsEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertItemsEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertItemsEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertItemsEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertItemsEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertItemsEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertItemsEqual(a, b)
# test utility functions supporting assertItemsEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try Unicode
self.assertGreater(u'bug', u'ant')
self.assertGreaterEqual(u'bug', u'ant')
self.assertGreaterEqual(u'ant', u'ant')
self.assertLess(u'ant', u'bug')
self.assertLessEqual(u'ant', u'bug')
self.assertLessEqual(u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
u'bug')
self.assertRaises(self.failureException, self.assertLess, u'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', u'ant')
# Try Mixed String/Unicode
self.assertGreater('bug', u'ant')
self.assertGreater(u'bug', 'ant')
self.assertGreaterEqual('bug', u'ant')
self.assertGreaterEqual(u'bug', 'ant')
self.assertGreaterEqual('ant', u'ant')
self.assertGreaterEqual(u'ant', 'ant')
self.assertLess('ant', u'bug')
self.assertLess(u'ant', 'bug')
self.assertLessEqual('ant', u'bug')
self.assertLessEqual(u'ant', 'bug')
self.assertLessEqual('ant', u'ant')
self.assertLessEqual(u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant',
u'bug')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', 'ant')
def testAssertMultiLineEqual(self):
sample_text = b"""\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = b"""\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = b"""\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
for type_changer in (lambda x: x, lambda x: x.decode('utf8')):
try:
self.assertMultiLineEqual(type_changer(sample_text),
type_changer(revised_sample_text))
except self.failureException, e:
# need to remove the first line of the error message
error = str(e).encode('utf8').split('\n', 1)[1]
# assertMultiLineEqual is hooked up as the default for
# unicode strings - so we can't use it for this check
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = u"laden swallows fly slowly"
revised_sample_text = u"unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegexpMatches(self):
self.assertRegexpMatches('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegexpMatches,
'saaas', r'aaaa')
def testAssertRaisesRegexp(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegexp(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegexp(ExceptionMock, 'expect$', Stub)
self.assertRaisesRegexp(ExceptionMock, u'expect$', Stub)
def testAssertNotRaisesRegexp(self):
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, 'x',
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, u'x',
lambda: None)
def testAssertRaisesRegexpMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, '^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, u'^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testSynonymAssertMethodNames(self):
"""Test undocumented method name synonyms.
Please do not use these methods names in your own code.
This test confirms their continued existence and functionality
in order to avoid breaking existing code.
"""
self.assertNotEquals(3, 5)
self.assertEquals(3, 3)
self.assertAlmostEquals(2.0, 2.0)
self.assertNotAlmostEquals(3.0, 5.0)
self.assert_(True)
def testPendingDeprecationMethodNames(self):
"""Test fail* methods pending deprecation, they will warn in 3.2.
Do not use these methods. They will go away in 3.3.
"""
with test_support.check_warnings():
self.failIfEqual(3, 5)
self.failUnlessEqual(3, 3)
self.failUnlessAlmostEqual(2.0, 2.0)
self.failIfAlmostEqual(3.0, 5.0)
self.failUnless(True)
self.failUnlessRaises(TypeError, lambda _: 3.14 + u'spam')
self.failIf(False)
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
flavour/eden | modules/plugins/__init__.py | 5 | 8807 | # -*- coding: utf-8 -*-
import os
import sys
from gluon import current
from gluon.storage import Storage
from s3compat import reload
__all__ = ("PluginLoader",
)
# Name of the plugin directory in modules
PLUGINS = "plugins"
# Module names to ignore when scanning for plugins
IGNORE = ("skeleton", "__init__")
# Name of the setup function in plugins
SETUP = "setup"
# Name of the variable that contains the version info in plugins
VERSION = "__version__"
# =============================================================================
class PluginLoader(object):
"""
Simple plugin loader (experimental)
Plugins are python modules or packages in the modules/plugins
directory.
Each plugin defines a setup() function which is called during
the request cycle immediately before entering the controller.
Plugins can be added by simply placing them in the plugins
directory, without any code change required.
The plugin directory will be scanned for new or updated plugins
whenever a new session starts, or by calling explicitly:
PluginLoader.detect(reset_all=True)
NB the reloading of the plugins can only be enforced in the
current interpreter thread - while other threads may still
run the old version. Therefore, it is recommended to restart
all threads (=reloading the server) after installing or updating
a plugin.
NB failing setup() methods will not be tried again until the next
reload (new session, restart, or explicit call)
session.s3.plugins contains a dict of all current plugins, like:
{name: (version, status)}
where:
- name is the python module name of the plugin
- version is the version string provided by the plugin (or
"unknown" if not present)
- status is:
None = newly detected plugin, not set up yet
True = plugin has been set up successfully
False = plugin setup failed in the last attempt, deactivated
"""
# -------------------------------------------------------------------------
@classmethod
def setup_all(cls, reload_all=False):
"""
Setup all plugins
@param reload_all: reload all plugins and reset the registry
"""
if reload_all:
cls.detect(reset_all=True)
for name in list(cls._registry().keys()):
cls.load(name)
# -------------------------------------------------------------------------
@classmethod
def detect(cls, reset_all=False):
"""
Detect new plugins and update the registry
@param reset_all: reset all entries in the registry
"""
default = (None, None)
if reset_all:
plugin = lambda name: default
else:
registry = cls._registry()
plugin = lambda name: registry.get(name, default)
plugins = dict((name, plugin(name)) for name in cls._scan())
cls._registry(plugins)
# -------------------------------------------------------------------------
@classmethod
def load(cls, name, force=False):
"""
Run the setup method of a particular plugin
@param name: the name of the plugin
@param force: enforce the plugin to be reloaded and its
setup method to be re-run regardless of the
previous status
"""
if name[0] == "_":
return False
log = current.log
registry = cls._registry()
if name not in registry:
cls.detect()
if name not in registry:
raise NameError("plugin '%s' not found" % name)
# Get version and status info from registry
plugin_info = registry[name]
if force or not isinstance(plugin_info, tuple):
version, status = None, None
else:
version, status = plugin_info
if status is None:
new = True
if not (cls._reload(name)):
version, status = "unknown", False
else:
version, status = None, True
else:
new = False
if status is False:
# Skip plugins which have failed in previous attempts
registry[name] = (version, status)
return False
status = True
setup = None
# Import manifest
package = "%s.%s" % (PLUGINS, name)
try:
setup = getattr(__import__(package, fromlist=[SETUP]), SETUP)
except (ImportError, AttributeError):
# This may not be a plugin at all => remove from registry
if new:
log.debug("Plugin '%s' not found" % name)
registry.pop(name, None)
return False
except SyntaxError:
if new:
log.error("Skipping invalid plugin '%s'" % name)
if current.response.s3.debug:
raise
version, status = "invalid", False
if version is None:
# Update version info if plugin has been reloaded
try:
version = getattr(__import__(package, fromlist=[VERSION]), VERSION)
except (ImportError, AttributeError):
version = "unknown"
if status and not callable(setup):
# Is a module => find setup function
try:
setup = setup.setup
except AttributeError:
# No setup function found => treat as failed
if new:
log.debug("No setup function found for plugin '%s'" % name)
status = False
if status:
# Execute setup method
if new:
log.info("Setting up plugin '%s'" % name)
try:
setup()
except Exception:
log.error("Plugin '%s' setup failed" % name)
if current.response.s3.debug:
raise
status = False
# Update the registry
registry[name] = (version, status)
return status
# -------------------------------------------------------------------------
@classmethod
def _registry(cls, plugins=None):
"""
Get (or replace) the current plugin registry
@param plugins: the new registry
"""
session_s3 = current.session.s3
if plugins:
registry = session_s3.plugins = plugins
else:
registry = session_s3.plugins
if registry is None:
# New session => run detect
# - initialize registry first to prevent infinite recursion
registry = session_s3.plugins = {}
cls.detect()
return registry
# -------------------------------------------------------------------------
@staticmethod
def _scan():
"""
Iterator scanning the plugin directory for available plugins
@return: the names of the plugins
"""
folder = current.request.folder
path = os.path.join(folder, "modules", PLUGINS)
names = os.listdir(path)
for name in names:
name_, extension = os.path.splitext(name)
if name_ in IGNORE:
continue
path_ = os.path.join(path, name)
if os.path.isdir(path_) or extension == ".py":
yield(name_)
# -------------------------------------------------------------------------
@staticmethod
def _reload(name):
"""
Reload a plugin
@param name: the plugin name
@note: this works only within the current thread, other
threads may still be bound to the old version of
the plugin
"""
if name in IGNORE:
return
success = True
appname = current.request.application
plugin_name = "applications.%s.modules.%s.%s" % (appname, PLUGINS, name)
plugin = sys.modules.get(plugin_name)
if plugin is not None:
try:
reload(plugin)
except ImportError:
current.log.error("Reloading plugin '%s' failed" % name)
success = False
return success
# =============================================================================
# Do a full scan when reloading the module (=when the thread starts)
PluginLoader.detect(reset_all=True)
# =============================================================================
| mit |
mitar/django | tests/regressiontests/localflavor/ro/tests.py | 33 | 5449 | # -*- coding: utf-8 -*-
from django.contrib.localflavor.ro.forms import (ROCIFField, ROCNPField,
ROCountyField, ROCountySelect, ROIBANField, ROPhoneNumberField,
ROPostalCodeField)
from django.test import SimpleTestCase
class ROLocalFlavorTests(SimpleTestCase):
def test_ROCountySelect(self):
f = ROCountySelect()
out = u'''<select name="county">
<option value="AB">Alba</option>
<option value="AR">Arad</option>
<option value="AG">Arge\u015f</option>
<option value="BC">Bac\u0103u</option>
<option value="BH">Bihor</option>
<option value="BN">Bistri\u0163a-N\u0103s\u0103ud</option>
<option value="BT">Boto\u015fani</option>
<option value="BV">Bra\u015fov</option>
<option value="BR">Br\u0103ila</option>
<option value="B">Bucure\u015fti</option>
<option value="BZ">Buz\u0103u</option>
<option value="CS">Cara\u015f-Severin</option>
<option value="CL">C\u0103l\u0103ra\u015fi</option>
<option value="CJ" selected="selected">Cluj</option>
<option value="CT">Constan\u0163a</option>
<option value="CV">Covasna</option>
<option value="DB">D\xe2mbovi\u0163a</option>
<option value="DJ">Dolj</option>
<option value="GL">Gala\u0163i</option>
<option value="GR">Giurgiu</option>
<option value="GJ">Gorj</option>
<option value="HR">Harghita</option>
<option value="HD">Hunedoara</option>
<option value="IL">Ialomi\u0163a</option>
<option value="IS">Ia\u015fi</option>
<option value="IF">Ilfov</option>
<option value="MM">Maramure\u015f</option>
<option value="MH">Mehedin\u0163i</option>
<option value="MS">Mure\u015f</option>
<option value="NT">Neam\u0163</option>
<option value="OT">Olt</option>
<option value="PH">Prahova</option>
<option value="SM">Satu Mare</option>
<option value="SJ">S\u0103laj</option>
<option value="SB">Sibiu</option>
<option value="SV">Suceava</option>
<option value="TR">Teleorman</option>
<option value="TM">Timi\u015f</option>
<option value="TL">Tulcea</option>
<option value="VS">Vaslui</option>
<option value="VL">V\xe2lcea</option>
<option value="VN">Vrancea</option>
</select>'''
self.assertHTMLEqual(f.render('county', 'CJ'), out)
def test_ROCIFField(self):
error_invalid = [u'Enter a valid CIF.']
error_atmost = [u'Ensure this value has at most 10 characters (it has 11).']
error_atleast = [u'Ensure this value has at least 2 characters (it has 1).']
valid = {
'21694681': u'21694681',
'RO21694681': u'21694681',
}
invalid = {
'21694680': error_invalid,
'21694680000': error_atmost,
'0': error_atleast + error_invalid,
}
self.assertFieldOutput(ROCIFField, valid, invalid)
def test_ROCNPField(self):
error_invalid = [u'Enter a valid CNP.']
error_atleast = [u'Ensure this value has at least 13 characters (it has 10).']
error_atmost = [u'Ensure this value has at most 13 characters (it has 14).']
valid = {
'1981211204489': '1981211204489',
}
invalid = {
'1981211204487': error_invalid,
'1981232204489': error_invalid,
'9981211204489': error_invalid,
'9981211209': error_atleast + error_invalid,
'19812112044891': error_atmost,
}
self.assertFieldOutput(ROCNPField, valid, invalid)
def test_ROCountyField(self):
error_format = [u'Enter a Romanian county code or name.']
valid = {
'CJ': 'CJ',
'cj': 'CJ',
u'Argeş': 'AG',
u'argeş': 'AG',
}
invalid = {
'Arges': error_format,
}
self.assertFieldOutput(ROCountyField, valid, invalid)
def test_ROIBANField(self):
error_invalid = [u'Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format']
error_atleast = [u'Ensure this value has at least 24 characters (it has 23).']
valid = {
'RO56RZBR0000060003291177': 'RO56RZBR0000060003291177',
'RO56-RZBR-0000-0600-0329-1177': 'RO56RZBR0000060003291177',
}
invalid = {
'RO56RZBR0000060003291176': error_invalid,
'AT61 1904 3002 3457 3201': error_invalid,
'RO56RZBR000006000329117': error_atleast + error_invalid,
}
self.assertFieldOutput(ROIBANField, valid, invalid)
def test_ROPhoneNumberField(self):
error_format = [u'Phone numbers must be in XXXX-XXXXXX format.']
error_atleast = [u'Ensure this value has at least 10 characters (it has 9).']
error_invalid = [u'Phone numbers must be in XXXX-XXXXXX format.']
valid = {
'0264485936': '0264485936',
'(0264)-485936': '0264485936',
}
invalid = {
'02644859368': error_format,
'026448593': error_atleast + error_invalid,
}
self.assertFieldOutput(ROPhoneNumberField, valid, invalid)
def test_ROPostalCodeField(self):
error_atleast = [u'Ensure this value has at least 6 characters (it has 5).']
error_atmost = [u'Ensure this value has at most 6 characters (it has 7).']
error_invalid = [u'Enter a valid postal code in the format XXXXXX']
valid = {
'400473': '400473',
}
invalid = {
'40047': error_atleast + error_invalid,
'4004731': error_atmost + error_invalid,
}
self.assertFieldOutput(ROPostalCodeField, valid, invalid)
| bsd-3-clause |
pilou-/ansible | lib/ansible/module_utils/oneview.py | 47 | 18924 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
import abc
import collections
import json
import os
import traceback
HPE_ONEVIEW_IMP_ERR = None
try:
from hpOneView.oneview_client import OneViewClient
HAS_HPE_ONEVIEW = True
except ImportError:
HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
HAS_HPE_ONEVIEW = False
from ansible.module_utils import six
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.common._collections_compat import Mapping
def transform_list_to_dict(list_):
"""
Transforms a list into a dictionary, putting values as keys.
:arg list list_: List of values
:return: dict: dictionary built
"""
ret = {}
if not list_:
return ret
for value in list_:
if isinstance(value, Mapping):
ret.update(value)
else:
ret[to_native(value, errors='surrogate_or_strict')] = True
return ret
def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
"""
Merge two lists by the key. It basically:
1. Adds the items that are present on updated_list and are absent on original_list.
2. Removes items that are absent on updated_list and are present on original_list.
3. For all items that are in both lists, overwrites the values from the original item by the updated item.
:arg list original_list: original list.
:arg list updated_list: list with changes.
:arg str key: unique identifier.
:arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
if its values are null.
:return: list: Lists merged.
"""
ignore_when_null = [] if ignore_when_null is None else ignore_when_null
if not original_list:
return updated_list
items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
merged_items = collections.OrderedDict()
for item in updated_list:
item_key = item[key]
if item_key in items_map:
for ignored_key in ignore_when_null:
if ignored_key in item and item[ignored_key] is None:
item.pop(ignored_key)
merged_items[item_key] = items_map[item_key]
merged_items[item_key].update(item)
else:
merged_items[item_key] = item
return list(merged_items.values())
def _str_sorted(obj):
if isinstance(obj, Mapping):
return json.dumps(obj, sort_keys=True)
else:
return str(obj)
def _standardize_value(value):
"""
Convert value to string to enhance the comparison.
:arg value: Any object type.
:return: str: Converted value.
"""
if isinstance(value, float) and value.is_integer():
# Workaround to avoid erroneous comparison between int and float
# Removes zero from integer floats
value = int(value)
return str(value)
class OneViewModuleException(Exception):
"""
OneView base Exception.
Attributes:
msg (str): Exception message.
oneview_response (dict): OneView rest response.
"""
def __init__(self, data):
self.msg = None
self.oneview_response = None
if isinstance(data, six.string_types):
self.msg = data
else:
self.oneview_response = data
if data and isinstance(data, dict):
self.msg = data.get('message')
if self.oneview_response:
Exception.__init__(self, self.msg, self.oneview_response)
else:
Exception.__init__(self, self.msg)
class OneViewModuleTaskError(OneViewModuleException):
"""
OneView Task Error Exception.
Attributes:
msg (str): Exception message.
error_code (str): A code which uniquely identifies the specific error.
"""
def __init__(self, msg, error_code=None):
super(OneViewModuleTaskError, self).__init__(msg)
self.error_code = error_code
class OneViewModuleValueError(OneViewModuleException):
"""
OneView Value Error.
The exception is raised when the data contains an inappropriate value.
Attributes:
msg (str): Exception message.
"""
pass
class OneViewModuleResourceNotFound(OneViewModuleException):
"""
OneView Resource Not Found Exception.
The exception is raised when an associated resource was not found.
Attributes:
msg (str): Exception message.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class OneViewModuleBase(object):
MSG_CREATED = 'Resource created successfully.'
MSG_UPDATED = 'Resource updated successfully.'
MSG_DELETED = 'Resource deleted successfully.'
MSG_ALREADY_PRESENT = 'Resource is already present.'
MSG_ALREADY_ABSENT = 'Resource is already absent.'
MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
ONEVIEW_COMMON_ARGS = dict(
config=dict(type='path'),
hostname=dict(type='str'),
username=dict(type='str'),
password=dict(type='str', no_log=True),
api_version=dict(type='int'),
image_streamer_hostname=dict(type='str')
)
ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
resource_client = None
def __init__(self, additional_arg_spec=None, validate_etag_support=False):
"""
OneViewModuleBase constructor.
:arg dict additional_arg_spec: Additional argument spec definition.
:arg bool validate_etag_support: Enables support to eTag validation.
"""
argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
self._check_hpe_oneview_sdk()
self._create_oneview_client()
self.state = self.module.params.get('state')
self.data = self.module.params.get('data')
# Preload params for get_all - used by facts
self.facts_params = self.module.params.get('params') or {}
# Preload options as dict - used by facts
self.options = transform_list_to_dict(self.module.params.get('options'))
self.validate_etag_support = validate_etag_support
def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
merged_arg_spec = dict()
merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
if validate_etag_support:
merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
if additional_arg_spec:
merged_arg_spec.update(additional_arg_spec)
return merged_arg_spec
def _check_hpe_oneview_sdk(self):
if not HAS_HPE_ONEVIEW:
self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
def _create_oneview_client(self):
if self.module.params.get('hostname'):
config = dict(ip=self.module.params['hostname'],
credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
api_version=self.module.params['api_version'],
image_streamer_ip=self.module.params['image_streamer_hostname'])
self.oneview_client = OneViewClient(config)
elif not self.module.params['config']:
self.oneview_client = OneViewClient.from_environment_variables()
else:
self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
@abc.abstractmethod
def execute_module(self):
"""
Abstract method, must be implemented by the inheritor.
This method is called from the run method. It should contains the module logic
:return: dict: It must return a dictionary with the attributes for the module result,
such as ansible_facts, msg and changed.
"""
pass
def run(self):
"""
Common implementation of the OneView run modules.
It calls the inheritor 'execute_module' function and sends the return to the Ansible.
It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
"""
try:
if self.validate_etag_support:
if not self.module.params.get('validate_etag'):
self.oneview_client.connection.disable_etag_validation()
result = self.execute_module()
if "changed" not in result:
result['changed'] = False
self.module.exit_json(**result)
except OneViewModuleException as exception:
error_msg = '; '.join(to_native(e) for e in exception.args)
self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
def resource_absent(self, resource, method='delete'):
"""
Generic implementation of the absent state for the OneView resources.
It checks if the resource needs to be removed.
:arg dict resource: Resource to delete.
:arg str method: Function of the OneView client that will be called for resource deletion.
Usually delete or remove.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if resource:
getattr(self.resource_client, method)(resource)
return {"changed": True, "msg": self.MSG_DELETED}
else:
return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
def get_by_name(self, name):
"""
Generic get by name implementation.
:arg str name: Resource name to search for.
:return: The resource found or None.
"""
result = self.resource_client.get_by('name', name)
return result[0] if result else None
def resource_present(self, resource, fact_name, create_method='create'):
"""
Generic implementation of the present state for the OneView resources.
It checks if the resource needs to be created or updated.
:arg dict resource: Resource to create or update.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg str create_method: Function of the OneView client that will be called for resource creation.
Usually create or add.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
changed = False
if "newName" in self.data:
self.data["name"] = self.data.pop("newName")
if not resource:
resource = getattr(self.resource_client, create_method)(self.data)
msg = self.MSG_CREATED
changed = True
else:
merged_data = resource.copy()
merged_data.update(self.data)
if self.compare(resource, merged_data):
msg = self.MSG_ALREADY_PRESENT
else:
resource = self.resource_client.update(merged_data)
changed = True
msg = self.MSG_UPDATED
return dict(
msg=msg,
changed=changed,
ansible_facts={fact_name: resource}
)
def resource_scopes_set(self, state, fact_name, scope_uris):
"""
Generic implementation of the scopes update PATCH for the OneView resources.
It checks if the resource needs to be updated with the current scopes.
This method is meant to be run after ensuring the present state.
:arg dict state: Dict containing the data from the last state results in the resource.
It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg list scope_uris: List with all the scope URIs to be added to the resource.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if scope_uris is None:
scope_uris = []
resource = state['ansible_facts'][fact_name]
operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
state['changed'] = True
state['msg'] = self.MSG_UPDATED
return state
def compare(self, first_resource, second_resource):
"""
Recursively compares dictionary contents equivalence, ignoring types and elements order.
Particularities of the comparison:
- Inexistent key = None
- These values are considered equal: None, empty, False
- Lists are compared value by value after a sort, if they have same size.
- Each element is converted to str before the comparison.
:arg dict first_resource: first dictionary
:arg dict second_resource: second dictionary
:return: bool: True when equal, False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The first resource is True / Not Null and the second resource is False / Null
if resource1 and not resource2:
self.module.log("resource1 and not resource2. " + debug_resources)
return False
# Checks all keys in first dict against the second dict
for key in resource1:
if key not in resource2:
if resource1[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# If both values are null, empty or False it will be considered equal.
elif not resource1[key] and not resource2[key]:
continue
elif isinstance(resource1[key], Mapping):
# recursive call
if not self.compare(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif isinstance(resource1[key], list):
# change comparison function to compare_list
if not self.compare_list(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# Checks all keys in the second dict, looking for missing elements
for key in resource2.keys():
if key not in resource1:
if resource2[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
return True
def compare_list(self, first_resource, second_resource):
"""
Recursively compares lists contents equivalence, ignoring types and element orders.
Lists with same size are compared value by value after a sort,
each element is converted to str before the comparison.
:arg list first_resource: first list
:arg list second_resource: second list
:return: True when equal; False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The second list is null / empty / False
if not resource2:
self.module.log("resource 2 is null. " + debug_resources)
return False
if len(resource1) != len(resource2):
self.module.log("resources have different length. " + debug_resources)
return False
resource1 = sorted(resource1, key=_str_sorted)
resource2 = sorted(resource2, key=_str_sorted)
for i, val in enumerate(resource1):
if isinstance(val, Mapping):
# change comparison function to compare dictionaries
if not self.compare(val, resource2[i]):
self.module.log("resources are different. " + debug_resources)
return False
elif isinstance(val, list):
# recursive call
if not self.compare_list(val, resource2[i]):
self.module.log("lists are different. " + debug_resources)
return False
elif _standardize_value(val) != _standardize_value(resource2[i]):
self.module.log("values are different. " + debug_resources)
return False
# no differences found
return True
| gpl-3.0 |
caiocsalvador/whats_the_craic | lib/python3.4/site-packages/django/utils/deconstruct.py | 502 | 2047 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| mit |
amluto/libseccomp | tests/21-live-basic_allow.py | 1 | 1695 | #!/usr/bin/env python
#
# Seccomp Library test program
#
# Copyright (c) 2013 Red Hat <pmoore@redhat.com>
# Author: Paul Moore <pmoore@redhat.com>
#
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of version 2.1 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, see <http://www.gnu.org/licenses>.
#
import argparse
import sys
import util
from seccomp import *
def test():
action = util.parse_action(sys.argv[1])
if not action == ALLOW:
quit(1)
util.install_trap()
f = SyscallFilter(TRAP)
# NOTE: additional syscalls required for python
f.add_rule_exactly(ALLOW, "stat")
f.add_rule_exactly(ALLOW, "fstat")
f.add_rule_exactly(ALLOW, "open")
f.add_rule_exactly(ALLOW, "mmap")
f.add_rule_exactly(ALLOW, "munmap")
f.add_rule_exactly(ALLOW, "read")
f.add_rule_exactly(ALLOW, "write")
f.add_rule_exactly(ALLOW, "close")
f.add_rule_exactly(ALLOW, "rt_sigaction")
f.add_rule_exactly(ALLOW, "rt_sigreturn")
f.add_rule_exactly(ALLOW, "exit_group")
f.load()
try:
util.write_file("/dev/null")
except OSError as ex:
quit(ex.errno)
quit(160)
test()
# kate: syntax python;
# kate: indent-mode python; space-indent on; indent-width 4; mixedindent off;
| lgpl-2.1 |
krismcfarlin/gae_endpoints_aloha | bp_includes/external/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| lgpl-3.0 |
ewandor/home-assistant | homeassistant/components/climate/netatmo.py | 1 | 5912 | """
Support for Netatmo Smart Thermostat.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.netatmo/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE
from homeassistant.components.climate import (
STATE_HEAT, STATE_IDLE, ClimateDevice, PLATFORM_SCHEMA,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE, SUPPORT_AWAY_MODE)
from homeassistant.util import Throttle
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['netatmo']
_LOGGER = logging.getLogger(__name__)
CONF_RELAY = 'relay'
CONF_THERMOSTAT = 'thermostat'
DEFAULT_AWAY_TEMPERATURE = 14
# # The default offeset is 2 hours (when you use the thermostat itself)
DEFAULT_TIME_OFFSET = 7200
# # Return cached results if last scan was less then this time ago
# # NetAtmo Data is uploaded to server every hour
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_RELAY): cv.string,
vol.Optional(CONF_THERMOSTAT, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE |
SUPPORT_AWAY_MODE)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
netatmo = get_component('netatmo')
device = config.get(CONF_RELAY)
import lnetatmo
try:
data = ThermostatData(netatmo.NETATMO_AUTH, device)
for module_name in data.get_module_names():
if CONF_THERMOSTAT in config:
if config[CONF_THERMOSTAT] != [] and \
module_name not in config[CONF_THERMOSTAT]:
continue
add_devices([NetatmoThermostat(data, module_name)], True)
except lnetatmo.NoDevice:
return None
class NetatmoThermostat(ClimateDevice):
"""Representation a Netatmo thermostat."""
def __init__(self, data, module_name, away_temp=None):
"""Initialize the sensor."""
self._data = data
self._state = None
self._name = module_name
self._target_temperature = None
self._away = None
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._target_temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._data.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def current_operation(self):
"""Return the current state of the thermostat."""
state = self._data.thermostatdata.relay_cmd
if state == 0:
return STATE_IDLE
elif state == 100:
return STATE_HEAT
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def turn_away_mode_on(self):
"""Turn away on."""
mode = "away"
temp = None
self._data.thermostatdata.setthermpoint(mode, temp, endTimeOffset=None)
self._away = True
def turn_away_mode_off(self):
"""Turn away off."""
mode = "program"
temp = None
self._data.thermostatdata.setthermpoint(mode, temp, endTimeOffset=None)
self._away = False
def set_temperature(self, **kwargs):
"""Set new target temperature for 2 hours."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
mode = "manual"
self._data.thermostatdata.setthermpoint(
mode, temperature, DEFAULT_TIME_OFFSET)
self._target_temperature = temperature
self._away = False
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
self._data.update()
self._target_temperature = self._data.thermostatdata.setpoint_temp
self._away = self._data.setpoint_mode == 'away'
class ThermostatData(object):
"""Get the latest data from Netatmo."""
def __init__(self, auth, device=None):
"""Initialize the data object."""
self.auth = auth
self.thermostatdata = None
self.module_names = []
self.device = device
self.current_temperature = None
self.target_temperature = None
self.setpoint_mode = None
def get_module_names(self):
"""Return all module available on the API as a list."""
self.update()
if not self.device:
for device in self.thermostatdata.modules:
for module in self.thermostatdata.modules[device].values():
self.module_names.append(module['module_name'])
else:
for module in self.thermostatdata.modules[self.device].values():
self.module_names.append(module['module_name'])
return self.module_names
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Call the NetAtmo API to update the data."""
import lnetatmo
self.thermostatdata = lnetatmo.ThermostatData(self.auth)
self.target_temperature = self.thermostatdata.setpoint_temp
self.setpoint_mode = self.thermostatdata.setpoint_mode
self.current_temperature = self.thermostatdata.temp
| apache-2.0 |
romain-li/edx-platform | pavelib/docs.py | 16 | 2005 | """
Open edX Documentation Builder
Ties into Sphinx to generate files at the specified location(s)
"""
from __future__ import print_function
import sys
from paver.easy import cmdopts, needs, sh, task
from .utils.timer import timed
DOC_PATHS = {
"dev": "docs/en_us/developers",
"author": "docs/en_us/course_authors",
"data": "docs/en_us/data",
"default": "docs/en_us"
}
def valid_doc_types():
"""
Return a comma-separated string of valid doc types.
"""
return ", ".join(DOC_PATHS.keys())
def doc_path(options, allow_default=True):
"""
Parse `options` (from the Paver task args) to determine the path
to the documentation directory.
If the specified path is not one of the valid options, print an error
message and exit.
If `allow_default` is False, then require that a type is specified,
and exit with an error message if it isn't.
"""
doc_type = getattr(options, 'type', 'default')
path = DOC_PATHS.get(doc_type)
if doc_type == 'default' and not allow_default:
print(
"You must specify a documentation type using '--type'. "
"Valid options are: {options}".format(
options=valid_doc_types()
)
)
sys.exit(1)
if path is None:
print(
"Invalid documentation type '{doc_type}'. "
"Valid options are: {options}".format(
doc_type=doc_type, options=valid_doc_types()
)
)
sys.exit(1)
else:
return path
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("type=", "t", "Type of docs to compile"),
("verbose", "v", "Display verbose output"),
])
@timed
def build_docs(options):
"""
Invoke sphinx 'make build' to generate docs.
"""
verbose = getattr(options, 'verbose', False)
cmd = "cd {dir}; make html quiet={quiet}".format(
dir=doc_path(options),
quiet="false" if verbose else "true"
)
sh(cmd)
| agpl-3.0 |
geokala/cloudify-manager | rest-service/manager_rest/blueprints_manager.py | 1 | 39060 | #########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import traceback
from datetime import datetime
from StringIO import StringIO
from flask import g, current_app
from dsl_parser import exceptions as parser_exceptions
from dsl_parser import functions
from dsl_parser import tasks
from dsl_parser.constants import DEPLOYMENT_PLUGINS_TO_INSTALL
from manager_rest import models
from manager_rest import manager_exceptions
from manager_rest.workflow_client import workflow_client
from manager_rest.storage_manager import get_storage_manager
from manager_rest.utils import maybe_register_teardown
LIMITLESS_GLOBAL_PARALLEL_EXECUTIONS_VALUE = -1
class DslParseException(Exception):
pass
class BlueprintAlreadyExistsException(Exception):
def __init__(self, blueprint_id, *args):
Exception.__init__(self, args)
self.blueprint_id = blueprint_id
class BlueprintsManager(object):
@property
def sm(self):
return get_storage_manager()
def blueprints_list(self, include=None):
return self.sm.blueprints_list(include=include)
def deployments_list(self, include=None):
return self.sm.deployments_list(include=include)
def executions_list(self, deployment_id=None,
is_include_system_workflows=False, include=None):
executions = self.sm.executions_list(deployment_id=deployment_id,
include=include)
return [e for e in executions if
is_include_system_workflows or not e.is_system_workflow]
def get_blueprint(self, blueprint_id, include=None):
return self.sm.get_blueprint(blueprint_id, include=include)
def get_deployment(self, deployment_id, include=None):
return self.sm.get_deployment(deployment_id=deployment_id,
include=include)
def get_execution(self, execution_id, include=None):
return self.sm.get_execution(execution_id, include=include)
def update_execution_status(self, execution_id, status, error):
if self._get_transient_deployment_workers_mode_config()['enabled'] and\
status in models.Execution.END_STATES:
execution = self.get_execution(execution_id)
# currently, the create and delete deployment environment
# workflows are still not marked as system workflows, so they're
# named explicitly
if not (execution.is_system_workflow or execution.workflow_id in (
'create_deployment_environment',
'delete_deployment_environment')):
# a user workflow has reached a final state - initiating
# a workflow to stop deployment workers
deployment = self.get_deployment(execution.deployment_id,
include=['id'])
wf_id = '_stop_deployment_environment'
deployment_env_creation_task_name = \
'cloudify_system_workflows.deployment_environment.stop'
kwargs = {
'prerequisite_task_id': execution_id
}
self._execute_system_workflow(
deployment, wf_id, deployment_env_creation_task_name,
kwargs)
return self.sm.update_execution_status(execution_id, status, error)
def publish_blueprint(self, dsl_location,
resources_base_url, blueprint_id):
try:
plan = tasks.parse_dsl(dsl_location, resources_base_url)
except Exception, ex:
raise DslParseException(str(ex))
now = str(datetime.now())
new_blueprint = models.BlueprintState(plan=plan,
id=blueprint_id,
created_at=now,
updated_at=now)
self.sm.put_blueprint(new_blueprint.id, new_blueprint)
return new_blueprint
def delete_blueprint(self, blueprint_id):
blueprint_deployments = self.sm.get_blueprint_deployments(blueprint_id)
if len(blueprint_deployments) > 0:
raise manager_exceptions.DependentExistsError(
"Can't delete blueprint {0} - There exist "
"deployments for this blueprint; Deployments ids: {1}"
.format(blueprint_id,
','.join([dep.id for dep
in blueprint_deployments])))
return self.sm.delete_blueprint(blueprint_id)
def delete_deployment(self, deployment_id, ignore_live_nodes=False):
# Verify deployment exists.
self.sm.get_deployment(deployment_id)
# validate there are no running executions for this deployment
executions = self.executions_list(deployment_id=deployment_id)
if any(execution.status not in models.Execution.END_STATES for
execution in executions):
raise manager_exceptions.DependentExistsError(
"Can't delete deployment {0} - There are running "
"executions for this deployment. Running executions ids: {1}"
.format(
deployment_id,
','.join([execution.id for execution in
executions if execution.status not
in models.Execution.END_STATES])))
if not ignore_live_nodes:
node_instances = self.sm.get_node_instances(
deployment_id=deployment_id)
# validate either all nodes for this deployment are still
# uninitialized or have been deleted
if any(node.state not in ('uninitialized', 'deleted') for node in
node_instances):
raise manager_exceptions.DependentExistsError(
"Can't delete deployment {0} - There are live nodes for "
"this deployment. Live nodes ids: {1}"
.format(deployment_id,
','.join([node.id for node in node_instances
if node.state not in
('uninitialized', 'deleted')])))
self._delete_deployment_environment(deployment_id)
return self.sm.delete_deployment(deployment_id)
def execute_workflow(self, deployment_id, workflow_id,
parameters=None,
allow_custom_parameters=False, force=False):
deployment = self.get_deployment(deployment_id)
if workflow_id not in deployment.workflows:
raise manager_exceptions.NonexistentWorkflowError(
'Workflow {0} does not exist in deployment {1}'.format(
workflow_id, deployment_id))
workflow = deployment.workflows[workflow_id]
self._verify_deployment_environment_created_successfully(deployment_id)
transient_workers_config =\
self._get_transient_deployment_workers_mode_config()
is_transient_workers_enabled = transient_workers_config['enabled']
self._check_for_active_executions(deployment_id, force,
transient_workers_config)
execution_parameters = \
BlueprintsManager._merge_and_validate_execution_parameters(
workflow, workflow_id, parameters, allow_custom_parameters)
if is_transient_workers_enabled:
# in this mode, we push the user execution object to storage
# before executing the "_start_deployment_environment" system
# workflow, to prevent from other executions to start running in
# between the system workflow and the user workflow execution.
# to keep correct chronological order, the system workflow's
# "created_at" field is generated here.
start_deployment_env_created_at_time = str(datetime.now())
execution_id = str(uuid.uuid4())
new_execution = models.Execution(
id=execution_id,
status=models.Execution.PENDING,
created_at=str(datetime.now()),
blueprint_id=deployment.blueprint_id,
workflow_id=workflow_id,
deployment_id=deployment_id,
error='',
parameters=self._get_only_user_execution_parameters(
execution_parameters),
is_system_workflow=False)
self.sm.put_execution(new_execution.id, new_execution)
if is_transient_workers_enabled:
# initiating a workflow to start deployment workers
wf_id = '_start_deployment_environment'
deployment_env_start_task_name = \
'cloudify_system_workflows.deployment_environment.start'
self._execute_system_workflow(
deployment, wf_id, deployment_env_start_task_name, timeout=300,
created_at=start_deployment_env_created_at_time)
# executing the user workflow
workflow_client().execute_workflow(
workflow_id,
workflow,
blueprint_id=deployment.blueprint_id,
deployment_id=deployment_id,
execution_id=execution_id,
execution_parameters=execution_parameters)
return new_execution
def _execute_system_workflow(self, deployment, wf_id, task_mapping,
execution_parameters=None, timeout=0,
created_at=None):
"""
:param deployment: deployment for workflow execution
:param wf_id: workflow id
:param task_mapping: mapping to the system workflow
:param execution_parameters: parameters for the system workflow
:param timeout: 0 will return immediately; any positive value will
cause this method to wait for the given timeout for the task to
complete, and verify it finished successfully before returning
:param created_at: creation time for the workflow execution object.
if omitted, a value will be generated by this method.
:return: async task object
"""
execution_id = str(uuid.uuid4()) # will also serve as the task id
execution_parameters = execution_parameters or {}
# currently, deployment env creation/deletion are not set as
# system workflows
is_system_workflow = wf_id not in (
'create_deployment_environment', 'delete_deployment_environment')
execution = models.Execution(
id=execution_id,
status=models.Execution.PENDING,
created_at=created_at or str(datetime.now()),
blueprint_id=deployment.blueprint_id,
workflow_id=wf_id,
deployment_id=deployment.id,
error='',
parameters=self._get_only_user_execution_parameters(
execution_parameters),
is_system_workflow=is_system_workflow)
self.sm.put_execution(execution.id, execution)
async_task = workflow_client().execute_system_workflow(
deployment, wf_id, execution_id, task_mapping,
execution_parameters)
if timeout > 0:
try:
# wait for the workflow execution to complete
async_task.get(timeout=timeout, propagate=True)
except Exception as e:
# error message for the user
error_msg =\
'Error occurred while executing the {0} system workflow '\
'for deployment {1}: {2} - {3}'.format(
wf_id, deployment.id, type(e).__name__, str(e))
# adding traceback to the log error message
tb = StringIO()
traceback.print_exc(file=tb)
log_error_msg = '{0}; traceback: {1}'.format(
error_msg, tb.getvalue())
current_app.logger.error(log_error_msg)
raise RuntimeError(error_msg)
# verify the execution completed successfully
execution = self.sm.get_execution(async_task.id)
if execution.status != models.Execution.TERMINATED:
raise RuntimeError(
'Failed executing the {0} system workflow for deployment '
'{1}: Execution did not complete successfully before '
'timeout ({2} seconds)'.format(
wf_id, deployment.id, timeout))
return async_task
def cancel_execution(self, execution_id, force=False):
"""
Cancel an execution by its id
If force is False (default), this method will request the
executed workflow to gracefully terminate. It is up to the workflow
to follow up on that request.
If force is used, this method will request the abrupt and immediate
termination of the executed workflow. This is valid for all
workflows, regardless of whether they provide support for graceful
termination or not.
Note that in either case, the execution is not yet cancelled upon
returning from the method. Instead, it'll be in a 'cancelling' or
'force_cancelling' status (as can be seen in models.Execution). Once
the execution is truly stopped, it'll be in 'cancelled' status (unless
force was not used and the executed workflow doesn't support
graceful termination, in which case it might simply continue
regardless and end up with a 'terminated' status)
:param execution_id: The execution id
:param force: A boolean describing whether to force cancellation
:return: The updated execution object
:rtype: models.Execution
:raises manager_exceptions.IllegalActionError
"""
execution = self.get_execution(execution_id)
if execution.status not in (models.Execution.PENDING,
models.Execution.STARTED) and \
(not force or execution.status != models.Execution
.CANCELLING):
raise manager_exceptions.IllegalActionError(
"Can't {0}cancel execution {1} because it's in status {2}"
.format(
'force-' if force else '',
execution_id,
execution.status))
new_status = models.Execution.CANCELLING if not force \
else models.Execution.FORCE_CANCELLING
self.sm.update_execution_status(
execution_id, new_status, '')
return self.get_execution(execution_id)
def create_deployment(self, blueprint_id, deployment_id, inputs=None):
blueprint = self.get_blueprint(blueprint_id)
plan = blueprint.plan
try:
deployment_plan = tasks.prepare_deployment_plan(plan, inputs)
except parser_exceptions.MissingRequiredInputError, e:
raise manager_exceptions.MissingRequiredDeploymentInputError(
str(e))
except parser_exceptions.UnknownInputError, e:
raise manager_exceptions.UnknownDeploymentInputError(str(e))
now = str(datetime.now())
new_deployment = models.Deployment(
id=deployment_id,
blueprint_id=blueprint_id, created_at=now, updated_at=now,
workflows=deployment_plan['workflows'],
inputs=deployment_plan['inputs'],
policy_types=deployment_plan['policy_types'],
policy_triggers=deployment_plan['policy_triggers'],
groups=deployment_plan['groups'],
outputs=deployment_plan['outputs'])
self.sm.put_deployment(deployment_id, new_deployment)
self._create_deployment_nodes(blueprint_id,
deployment_id,
deployment_plan)
node_instances = deployment_plan['node_instances']
self._create_deployment_node_instances(deployment_id,
node_instances)
self._create_deployment_environment(new_deployment, deployment_plan)
return new_deployment
def start_deployment_modification(self,
deployment_id,
modified_nodes,
context):
# verify deployment exists
self.sm.get_deployment(deployment_id, include=['id'])
existing_modifications = self.sm.deployment_modifications_list(
deployment_id=deployment_id, include=['id', 'status'])
active_modifications = [
m.id for m in existing_modifications
if m.status == models.DeploymentModification.STARTED]
if active_modifications:
raise \
manager_exceptions.ExistingStartedDeploymentModificationError(
'Cannot start deployment modification while there are '
'existing started deployment modifications. Currently '
'started deployment modifications: {0}'
.format(active_modifications))
nodes = [node.to_dict() for node in self.sm.get_nodes(deployment_id)]
node_instances = [instance.to_dict() for instance
in self.sm.get_node_instances(deployment_id)]
node_instances_modification = tasks.modify_deployment(
nodes=nodes,
previous_node_instances=node_instances,
modified_nodes=modified_nodes)
node_instances_modification['before_modification'] = [
instance.to_dict() for instance in
self.sm.get_node_instances(deployment_id)]
now = str(datetime.now())
modification_id = str(uuid.uuid4())
modification = models.DeploymentModification(
id=modification_id,
created_at=now,
ended_at=None,
status=models.DeploymentModification.STARTED,
deployment_id=deployment_id,
modified_nodes=modified_nodes,
node_instances=node_instances_modification,
context=context)
self.sm.put_deployment_modification(modification_id, modification)
for node_id, modified_node in modified_nodes.items():
self.sm.update_node(
modification.deployment_id, node_id,
planned_number_of_instances=modified_node['instances'])
added_and_related = node_instances_modification['added_and_related']
added_node_instances = []
for node_instance in added_and_related:
if node_instance.get('modification') == 'added':
added_node_instances.append(node_instance)
else:
current = self.sm.get_node_instance(node_instance['id'])
new_relationships = current.relationships
new_relationships += node_instance['relationships']
self.sm.update_node_instance(models.DeploymentNodeInstance(
id=node_instance['id'],
relationships=new_relationships,
version=current.version,
node_id=None,
host_id=None,
deployment_id=None,
state=None,
runtime_properties=None))
self._create_deployment_node_instances(deployment_id,
added_node_instances)
return modification
def finish_deployment_modification(self, modification_id):
modification = self.sm.get_deployment_modification(modification_id)
if modification.status in models.DeploymentModification.END_STATES:
raise manager_exceptions.DeploymentModificationAlreadyEndedError(
'Cannot finish deployment modification: {0}. It is already in'
' {1} status.'.format(modification_id,
modification.status))
modified_nodes = modification.modified_nodes
for node_id, modified_node in modified_nodes.items():
self.sm.update_node(modification.deployment_id, node_id,
number_of_instances=modified_node['instances'])
node_instances = modification.node_instances
for node_instance in node_instances['removed_and_related']:
if node_instance.get('modification') == 'removed':
self.sm.delete_node_instance(node_instance['id'])
else:
removed_relationship_target_ids = set(
[rel['target_id']
for rel in node_instance['relationships']])
current = self.sm.get_node_instance(node_instance['id'])
new_relationships = [rel for rel in current.relationships
if rel['target_id']
not in removed_relationship_target_ids]
self.sm.update_node_instance(models.DeploymentNodeInstance(
id=node_instance['id'],
relationships=new_relationships,
version=current.version,
node_id=None,
host_id=None,
deployment_id=None,
state=None,
runtime_properties=None))
now = str(datetime.now())
self.sm.update_deployment_modification(
models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.FINISHED,
ended_at=now,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=None,
context=None))
return models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.FINISHED,
ended_at=None,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=None,
context=None)
def rollback_deployment_modification(self, modification_id):
modification = self.sm.get_deployment_modification(modification_id)
if modification.status in models.DeploymentModification.END_STATES:
raise manager_exceptions.DeploymentModificationAlreadyEndedError(
'Cannot rollback deployment modification: {0}. It is already '
'in {1} status.'.format(modification_id,
modification.status))
node_instances = self.sm.get_node_instances(modification.deployment_id)
modification.node_instances['before_rollback'] = [
instance.to_dict() for instance in node_instances]
for instance in node_instances:
self.sm.delete_node_instance(instance.id)
for instance in modification.node_instances['before_modification']:
self.sm.put_node_instance(
models.DeploymentNodeInstance(**instance))
nodes_num_instances = {node.id: node for node in self.sm.get_nodes(
deployment_id=modification.deployment_id,
include=['id', 'number_of_instances'])}
for node_id, modified_node in modification.modified_nodes.items():
self.sm.update_node(
modification.deployment_id, node_id,
planned_number_of_instances=nodes_num_instances[
node_id].number_of_instances)
now = str(datetime.now())
self.sm.update_deployment_modification(
models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.ROLLEDBACK,
ended_at=now,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=modification.node_instances,
context=None))
return models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.ROLLEDBACK,
ended_at=None,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=None,
context=None)
def _get_node_instance_ids(self, deployment_id):
return self.sm.get_node_instances(deployment_id, include=['id'])
def _create_deployment_node_instances(self,
deployment_id,
dsl_node_instances):
for node_instance in dsl_node_instances:
instance_id = node_instance['id']
node_id = node_instance['name']
relationships = node_instance.get('relationships', [])
host_id = node_instance.get('host_id')
instance = models.DeploymentNodeInstance(
id=instance_id,
node_id=node_id,
host_id=host_id,
relationships=relationships,
deployment_id=deployment_id,
state='uninitialized',
runtime_properties={},
version=None)
self.sm.put_node_instance(instance)
def evaluate_deployment_outputs(self, deployment_id):
deployment = self.get_deployment(
deployment_id, include=['outputs'])
def get_node_instances(node_id=None):
return self.sm.get_node_instances(deployment_id, node_id)
def get_node_instance(node_instance_id):
return self.sm.get_node_instance(node_instance_id)
def get_node(node_id):
return self.sm.get_node(deployment_id, node_id)
try:
return functions.evaluate_outputs(
outputs_def=deployment.outputs,
get_node_instances_method=get_node_instances,
get_node_instance_method=get_node_instance,
get_node_method=get_node)
except parser_exceptions.FunctionEvaluationError, e:
raise manager_exceptions.DeploymentOutputsEvaluationError(str(e))
def evaluate_functions(self, deployment_id, context, payload):
self.get_deployment(deployment_id, include=['id'])
def get_node_instances(node_id=None):
return self.sm.get_node_instances(deployment_id, node_id)
def get_node_instance(node_instance_id):
return self.sm.get_node_instance(node_instance_id)
def get_node(node_id):
return self.sm.get_node(deployment_id, node_id)
try:
return functions.evaluate_functions(
payload=payload,
context=context,
get_node_instances_method=get_node_instances,
get_node_instance_method=get_node_instance,
get_node_method=get_node)
except parser_exceptions.FunctionEvaluationError, e:
raise manager_exceptions.FunctionsEvaluationError(str(e))
def _create_deployment_nodes(self, blueprint_id, deployment_id, plan):
for raw_node in plan['nodes']:
num_instances = raw_node['instances']['deploy']
self.sm.put_node(models.DeploymentNode(
id=raw_node['name'],
deployment_id=deployment_id,
blueprint_id=blueprint_id,
type=raw_node['type'],
type_hierarchy=raw_node['type_hierarchy'],
number_of_instances=num_instances,
planned_number_of_instances=num_instances,
deploy_number_of_instances=num_instances,
host_id=raw_node['host_id'] if 'host_id' in raw_node else None,
properties=raw_node['properties'],
operations=raw_node['operations'],
plugins=raw_node['plugins'],
plugins_to_install=raw_node.get('plugins_to_install'),
relationships=self._prepare_node_relationships(raw_node)
))
@staticmethod
def _merge_and_validate_execution_parameters(
workflow, workflow_name, execution_parameters=None,
allow_custom_parameters=False):
"""
merge parameters - parameters passed directly to execution request
override workflow parameters from the original plan. any
parameters without a default value in the blueprint must
appear in the execution request parameters.
Custom parameters will be passed to the workflow as well if allowed;
Otherwise, an exception will be raised if such parameters are passed.
"""
merged_execution_parameters = dict()
workflow_parameters = workflow.get('parameters', dict())
execution_parameters = execution_parameters or dict()
missing_mandatory_parameters = set()
for param_name, param in workflow_parameters.iteritems():
if 'default' not in param:
# parameter without a default value - ensure one was
# provided via execution parameters
if param_name not in execution_parameters:
missing_mandatory_parameters.add(param_name)
continue
merged_execution_parameters[param_name] = \
execution_parameters[param_name]
else:
merged_execution_parameters[param_name] = \
execution_parameters[param_name] if \
param_name in execution_parameters else param['default']
if missing_mandatory_parameters:
raise \
manager_exceptions.IllegalExecutionParametersError(
'Workflow "{0}" must be provided with the following '
'parameters to execute: {1}'.format(
workflow_name, ','.join(missing_mandatory_parameters)))
custom_parameters = {k: v for k, v in execution_parameters.iteritems()
if k not in workflow_parameters}
if not allow_custom_parameters and custom_parameters:
raise \
manager_exceptions.IllegalExecutionParametersError(
'Workflow "{0}" does not have the following parameters '
'declared: {1}. Remove these parameters or use '
'the flag for allowing custom parameters'
.format(workflow_name, ','.join(custom_parameters.keys())))
merged_execution_parameters.update(custom_parameters)
return merged_execution_parameters
@staticmethod
def _prepare_node_relationships(raw_node):
if 'relationships' not in raw_node:
return []
prepared_relationships = []
for raw_relationship in raw_node['relationships']:
relationship = {
'target_id': raw_relationship['target_id'],
'type': raw_relationship['type'],
'type_hierarchy': raw_relationship['type_hierarchy'],
'properties': raw_relationship['properties'],
'source_operations': raw_relationship['source_operations'],
'target_operations': raw_relationship['target_operations'],
}
prepared_relationships.append(relationship)
return prepared_relationships
def _verify_deployment_environment_created_successfully(self,
deployment_id):
env_creation = next(
(execution for execution in
self.sm.executions_list(deployment_id=deployment_id)
if execution.workflow_id == 'create_deployment_environment'),
None)
if not env_creation:
raise RuntimeError('Failed to find "create_deployment_environment"'
' execution for deployment {0}'.format(
deployment_id))
status = env_creation.status
if status == models.Execution.TERMINATED:
return
elif status == models.Execution.PENDING:
raise manager_exceptions \
.DeploymentEnvironmentCreationPendingError(
'Deployment environment creation is still pending, '
'try again in a minute')
elif status == models.Execution.STARTED:
raise manager_exceptions\
.DeploymentEnvironmentCreationInProgressError(
'Deployment environment creation is still in progress, '
'try again in a minute')
elif status == models.Execution.FAILED:
raise RuntimeError(
"Can't launch executions since environment creation for "
"deployment {0} has failed: {1}".format(
deployment_id, env_creation.error))
elif status in (
models.Execution.CANCELLED, models.Execution.CANCELLING,
models.Execution.FORCE_CANCELLING):
raise RuntimeError(
"Can't launch executions since the environment creation for "
"deployment {0} has been cancelled [status={1}]".format(
deployment_id, status))
else:
raise RuntimeError(
'Unexpected deployment status for deployment {0} '
'[status={1}]'.format(deployment_id, status))
def _create_deployment_environment(self, deployment, deployment_plan):
wf_id = 'create_deployment_environment'
deployment_env_creation_task_name = \
'cloudify_system_workflows.deployment_environment.create'
kwargs = {
DEPLOYMENT_PLUGINS_TO_INSTALL: deployment_plan[
DEPLOYMENT_PLUGINS_TO_INSTALL],
'workflow_plugins_to_install': deployment_plan[
'workflow_plugins_to_install'],
'policy_configuration': {
'policy_types': deployment_plan['policy_types'],
'policy_triggers': deployment_plan['policy_triggers'],
'groups': deployment_plan['groups'],
},
}
self._execute_system_workflow(
deployment, wf_id, deployment_env_creation_task_name, kwargs)
def _delete_deployment_environment(self, deployment_id):
deployment = self.sm.get_deployment(deployment_id)
wf_id = 'delete_deployment_environment'
deployment_env_deletion_task_name = \
'cloudify_system_workflows.deployment_environment.delete'
self._execute_system_workflow(
deployment, wf_id, deployment_env_deletion_task_name, timeout=300)
def _check_for_active_executions(self, deployment_id, force,
transient_workers_config):
is_transient_workers_enabled = transient_workers_config['enabled']
def _get_running_executions(deployment_id=None, include_system=True):
executions = self.executions_list(
deployment_id=deployment_id,
is_include_system_workflows=include_system)
running = [
e.id for e in executions if
self.sm.get_execution(e.id).status
not in models.Execution.END_STATES]
return running
# validate no execution is currently in progress
if not force:
running = _get_running_executions(deployment_id)
if len(running) > 0:
raise manager_exceptions.ExistingRunningExecutionError(
'The following executions are currently running for this '
'deployment: {0}. To execute this workflow anyway, pass '
'"force=true" as a query parameter to this request'.format(
running))
elif is_transient_workers_enabled:
raise manager_exceptions.ExistingRunningExecutionError(
'Forcing parallel executions in a single deployment is '
'disabled in transient deployment workers mode')
if is_transient_workers_enabled:
global_parallel_executions_limit = \
transient_workers_config['global_parallel_executions_limit']
if global_parallel_executions_limit != \
LIMITLESS_GLOBAL_PARALLEL_EXECUTIONS_VALUE:
running = _get_running_executions()
if len(running) >= global_parallel_executions_limit:
raise manager_exceptions. \
GlobalParallelRunningExecutionsLimitReachedError(
'New workflows may not be executed at this time,'
'because global parallel running executions limit '
'has been reached ({0} running executions; '
'global limit {1}). Please try again soon'
.format(len(running),
global_parallel_executions_limit))
def _get_transient_deployment_workers_mode_config(self):
provider_context = self.sm.get_provider_context().context
transient_workers_config = provider_context['cloudify'].get(
'transient_deployment_workers_mode', {})
# setting defaults if missing
transient_workers_config['enabled'] = \
transient_workers_config.get('enabled', False)
transient_workers_config['global_parallel_executions_limit'] = \
transient_workers_config.get(
'global_parallel_executions_limit',
LIMITLESS_GLOBAL_PARALLEL_EXECUTIONS_VALUE)
return transient_workers_config
@staticmethod
def _get_only_user_execution_parameters(execution_parameters):
return {k: v for k, v in execution_parameters.iteritems()
if not k.startswith('__')}
def teardown_blueprints_manager(exception):
# print "tearing down blueprints manager!"
pass
# What we need to access this manager in Flask
def get_blueprints_manager():
"""
Get the current blueprints manager
or create one if none exists for the current app context
"""
if 'blueprints_manager' not in g:
g.blueprints_manager = BlueprintsManager()
maybe_register_teardown(current_app, teardown_blueprints_manager)
return g.blueprints_manager
| apache-2.0 |
goddardl/cortex | test/IECore/TransformOpTest.py | 2 | 7030 | ##########################################################################
#
# Copyright (c) 2008-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
class TestTransformOp( unittest.TestCase ) :
def testParameterDefaults( self ) :
o = TransformOp()
self.assertEqual( o["primVarsToModify"].getValue(), StringVectorData( [ "P", "N" ] ) )
def testTranformation( self ) :
m = MeshPrimitive.createBox( Box3f( V3f( -1 ), V3f( 1 ) ) )
MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData( [ V3f( 0.5 ) ] * 8, GeometricData.Interpretation.Vector ) )
m["notVel"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData( [ V3f( 0.5 ) ] * 8 ) )
mt = TransformOp()( input=m, primVarsToModify = StringVectorData( m.keys() ), matrix = M44fData( M44f.createTranslated( V3f( 1 ) ) ) )
self.assertEqual( mt.bound(), Box3f( V3f( 0 ), V3f( 2 ) ) )
self.assertEqual( mt["P"].data, V3fVectorData( [ x + V3f( 1 ) for x in m["P"].data ], GeometricData.Interpretation.Point ) )
self.assertEqual( mt["N"].data, m["N"].data )
self.assertEqual( mt["vel"].data, m["vel"].data )
self.assertEqual( mt["notVel"].data, m["notVel"].data )
ms = TransformOp()( input=m, primVarsToModify = StringVectorData( m.keys() ), matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms.bound(), Box3f( V3f( -1, -2, -3 ), V3f( 1, 2, 3 ) ) )
self.assertEqual( ms["P"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["P"].data ], GeometricData.Interpretation.Point ) )
self.assertNotEqual( ms["N"].data, m["N"].data )
self.assertNotEqual( ms["N"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["N"].data ], GeometricData.Interpretation.Normal ) )
self.assertEqual( ms["vel"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["vel"].data ], GeometricData.Interpretation.Vector ) )
self.assertEqual( ms["notVel"].data, m["notVel"].data )
self.assertEqual( ms["P"].data.getInterpretation(), GeometricData.Interpretation.Point )
self.assertEqual( ms["N"].data.getInterpretation(), GeometricData.Interpretation.Normal )
self.assertEqual( ms["vel"].data.getInterpretation(), GeometricData.Interpretation.Vector )
self.assertEqual( ms["notVel"].data.getInterpretation(), GeometricData.Interpretation.Numeric )
def testPrimVarParameter( self ) :
m = MeshPrimitive.createBox( Box3f( V3f( -1 ), V3f( 1 ) ) )
MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData( [ V3f( 0.5 ) ] * 8, GeometricData.Interpretation.Vector ) )
m["notVel"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData( [ V3f( 0.5 ) ] * 8 ) )
ms = TransformOp()( input=m, primVarsToModify = StringVectorData( [ "P", "vel" ] ), matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms.bound(), Box3f( V3f( -1, -2, -3 ), V3f( 1, 2, 3 ) ) )
self.assertEqual( ms["P"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["P"].data ], GeometricData.Interpretation.Point ) )
self.assertEqual( ms["N"].data, m["N"].data )
self.assertEqual( ms["vel"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["vel"].data ], GeometricData.Interpretation.Vector ) )
self.assertEqual( ms["notVel"].data, m["notVel"].data )
ms = TransformOp()( input=m, primVarsToModify = StringVectorData( [ "P" ] ), matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms.bound(), Box3f( V3f( -1, -2, -3 ), V3f( 1, 2, 3 ) ) )
self.assertEqual( ms["P"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["P"].data ], GeometricData.Interpretation.Point ) )
self.assertEqual( ms["N"].data, m["N"].data )
self.assertEqual( ms["N"].data, m["N"].data )
self.assertEqual( ms["notVel"].data, m["notVel"].data )
def testSamePrimVars( self ) :
m = MeshPrimitive.createBox( Box3f( V3f( -1 ), V3f( 1 ) ) )
MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData( [ V3f( 0.5 ) ] * 8, GeometricData.Interpretation.Vector ) )
m["sameVel"] = m["vel"]
ms = TransformOp()( input=m, primVarsToModify = StringVectorData( [ "vel", "sameVel" ] ), matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms["vel"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["vel"].data ], GeometricData.Interpretation.Vector ) )
self.assertEqual( ms["vel"].data, ms["sameVel"].data )
def testIdenticalPrimVarsCanBeExcluded( self ) :
m = MeshPrimitive.createBox( Box3f( V3f( -1 ), V3f( 1 ) ) )
MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData( [ V3f( 0.5 ) ] * 8, GeometricData.Interpretation.Vector ) )
m["otherVel"] = m["vel"]
ms = TransformOp()( input=m, primVarsToModify = StringVectorData( [ "vel" ] ), matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms["vel"].data, V3fVectorData( [ x * V3f( 1, 2, 3 ) for x in m["vel"].data ], GeometricData.Interpretation.Vector ) )
self.assertNotEqual( ms["vel"].data, ms["otherVel"].data )
self.assertEqual( ms["otherVel"].data, m["otherVel"].data )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
bdeak/taskmgr | fabfile/execute/install_package.py | 1 | 2064 | from fabric.api import *
import re
import os.path
import logging
import utils.log
l = logging.getLogger()
l = utils.log.CustomLogAdapter(l, None)
@task(default=True)
def check(input_params, cluster):
""" Install a given version of a given package
Can support multiple backends
input_params parameter is a string, with the following fields:
package:version
The backend to be used for package management is autodetected.
For adapting to various systems this needs to be extended.
"""
# split up the input_params, and make sense of it
m = re.search("^([^:]+)(?::(.+))?$", input_params)
if not m:
raise AttributeError("The given input_params '%s' doesn't match the requirements!" % input_params)
package = m.group(1)
version = m.group(2) if m.group(2) else None
# auto detect the backend
try:
result = run("test -e /usr/bin/apt-get")
except:
return False
if result.failed:
raise RuntimeError("%s: Failed to execute remote command for detecting backend" % env.command)
if result.return_code == 0:
backend = "apt_get"
else:
# check for other backends - note yet implemented
raise SystemError("%s: only backend 'apt_get' is currently supported." % env.command)
backends = { 'apt_get': install_package_apt_get }
if not backend in backends.keys():
raise ValueError("function for detected backend '%s' is not found!" % backend)
return backends[backend](package, version)
def install_package_apt_get(package, version):
""" Install the package, internal function, not exposed via @task """
if version is None:
# just install the package
command = "apt-get -qq update && apt-get -qq install -y %s" % package
else:
command = "apt-get -qq update && apt-get -qq install -y %s=%s" % (package, version)
try:
result = sudo(command)
except:
return False
if result.succeeded:
return True
else:
return False
| gpl-2.0 |
okolisny/integration_tests | scripts/post_jenkins_result.py | 1 | 2181 | #!/usr/bin/env python2
import json
import os
import os.path
from datetime import datetime
from artifactor.plugins.post_result import test_report
from cfme.utils import read_env
from cfme.utils.path import project_path
from cfme.utils.trackerbot import post_jenkins_result
job_name = os.environ['JOB_NAME']
number = int(os.environ['BUILD_NUMBER'])
date = str(datetime.now())
# reduce returns to bools for easy logic
runner_src = read_env(project_path.join('.jenkins_runner_result'))
runner_return = runner_src.get('RUNNER_RETURN', '1') == '0'
test_return = runner_src.get('TEST_RETURN', '1') == '0'
# 'stream' environ is set by jenkins for all stream test jobs
# but not in the template tester
if job_name not in ('template-tester', 'template-tester-openstack',
'template-tester-rhevm', 'template-tester-virtualcenter'):
# try to pull out the appliance template name
template_src = read_env(project_path.join('.appliance_template'))
template = template_src.get('appliance_template', 'Unknown')
stream = os.environ['stream']
else:
tester_src = read_env(project_path.join('.template_tester'))
stream = tester_src['stream']
template = tester_src['appliance_template']
if test_report.check():
with test_report.open() as f:
artifact_report = json.load(f)
else:
raise RuntimeError('Unable to post to jenkins without test report: '
'{} does not exist!'.format(test_report.strpath))
if runner_return and test_return:
build_status = 'success'
elif runner_return:
build_status = 'unstable'
else:
build_status = 'failed'
result_attrs = ('job_name', 'number', 'stream', 'date', 'template',
'build_status', 'artifact_report')
# pack the result attr values into the jenkins post
post_jenkins_result(*[eval(attr) for attr in result_attrs])
# vain output padding calculation
# get len of longest string, pad with an extra space to make the output pretty
max_len = len(max(result_attrs, key=len)) + 1
# now print all the attrs so we can see what we posted (and *that* we
# posted) in the jenkins log
for attr in result_attrs[:-1]:
print('{:>{width}}: {}'.format(attr, eval(attr), width=max_len))
| gpl-2.0 |
najeeb97khan/Tensorflow-CS20SI | Assignment_2/style_transfer/utils.py | 3 | 2227 | """ Utils needed for the implementation of the paper "A Neural Algorithm of Artistic Style"
by Gatys et al. in TensorFlow.
Author: Chip Huyen (huyenn@stanford.edu)
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
For more details, please read the assignment handout:
http://web.stanford.edu/class/cs20si/assignments/a2.pdf
"""
from __future__ import print_function
import os
from PIL import Image, ImageOps
import numpy as np
import scipy.misc
from six.moves import urllib
def download(download_link, file_name, expected_bytes):
""" Download the pretrained VGG-19 model if it's not already downloaded """
if os.path.exists(file_name):
print("Dataset ready")
return
print("Downloading the VGG pre-trained model. This might take a while ...")
file_name, _ = urllib.request.urlretrieve(download_link, file_name)
file_stat = os.stat(file_name)
if file_stat.st_size == expected_bytes:
print('Successfully downloaded the file', file_name)
else:
raise Exception('File ' + file_name +
' might be corrupted. You should try downloading it with a browser.')
def get_resized_image(img_path, height, width, save=True):
image = Image.open(img_path)
# it's because PIL is column major so you have to change place of width and height
# this is stupid, i know
image = ImageOps.fit(image, (width, height), Image.ANTIALIAS)
if save:
image_dirs = img_path.split('/')
image_dirs[-1] = 'resized_' + image_dirs[-1]
out_path = '/'.join(image_dirs)
if not os.path.exists(out_path):
image.save(out_path)
image = np.asarray(image, np.float32)
return np.expand_dims(image, 0)
def generate_noise_image(content_image, height, width, noise_ratio=0.6):
noise_image = np.random.uniform(-20, 20,
(1, height, width, 3)).astype(np.float32)
return noise_image * noise_ratio + content_image * (1 - noise_ratio)
def save_image(path, image):
# Output should add back the mean pixels we subtracted at the beginning
image = image[0] # the image
image = np.clip(image, 0, 255).astype('uint8')
scipy.misc.imsave(path, image) | mit |
cetic/ansible | lib/ansible/modules/network/cloudengine/ce_vrf_interface.py | 46 | 15550 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_vrf_interface
version_added: "2.4"
short_description: Manages interface specific VPN configuration on HUAWEI CloudEngine switches.
description:
- Manages interface specific VPN configuration of HUAWEI CloudEngine switches.
author: Zhijin Zhou (@CloudEngine-Ansible)
notes:
- Ensure that a VPN instance has been created and the IPv4 address family has been enabled for the VPN instance.
options:
vrf:
description:
- VPN instance, the length of vrf name is 1 ~ 31, i.e. "test", but can not be C(_public_).
required: true
vpn_interface:
description:
- An interface that can binding VPN instance, i.e. 40GE1/0/22, Vlanif10.
Must be fully qualified interface name.
Interface types, such as 10GE, 40GE, 100GE, LoopBack, MEth, Tunnel, Vlanif....
required: true
state:
description:
- Manage the state of the resource.
required: false
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- name: VRF interface test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure a VPN instance for the interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: present
provider: "{{ cli }}"
- name: "Disable the association between a VPN instance and an interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {
"state": "present",
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
existing:
description: k/v pairs of existing attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": null
}
end_state:
description: k/v pairs of end attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"ip binding vpn-instance jss",
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import ce_argument_spec, get_nc_config, set_nc_config
CE_NC_GET_VRF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_GET_VRF_INTERFACE = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName></vrfName>
<l3vpnIfs>
<l3vpnIf>
<ifName></ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_MERGE_VRF_INTERFACE = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="merge">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_DEL_INTF_VPN = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="delete">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class VrfInterface(object):
"""Manange vpn instance"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# vpn instance info
self.vrf = self.module.params['vrf']
self.vpn_interface = self.module.params['vpn_interface']
self.vpn_interface = self.vpn_interface.upper().replace(' ', '')
self.state = self.module.params['state']
self.intf_info = dict()
self.intf_info['isL2SwitchPort'] = None
self.intf_info['vrfName'] = None
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init_module"""
required_one_of = [("vrf", "vpn_interface")]
self.module = AnsibleModule(
argument_spec=self.spec, required_one_of=required_one_of, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_update_cmd(self):
""" get updated command"""
if self.conf_exist:
return
if self.state == 'absent':
self.updates_cmd.append(
"undo ip binding vpn-instance %s" % self.vrf)
return
if self.vrf != self.intf_info['vrfName']:
self.updates_cmd.append("ip binding vpn-instance %s" % self.vrf)
return
def check_params(self):
"""Check all input params"""
if not self.is_vrf_exist():
self.module.fail_json(
msg='Error: The VPN instance is not existed.')
if self.state == 'absent':
if self.vrf != self.intf_info['vrfName']:
self.module.fail_json(
msg='Error: The VPN instance is not bound to the interface.')
if self.intf_info['isL2SwitchPort'] == 'true':
self.module.fail_json(
msg='Error: L2Switch Port can not binding a VPN instance.')
# interface type check
if self.vpn_interface:
intf_type = get_interface_type(self.vpn_interface)
if not intf_type:
self.module.fail_json(
msg='Error: interface name of %s'
' is error.' % self.vpn_interface)
# vrf check
if self.vrf == '_public_':
self.module.fail_json(
msg='Error: The vrf name _public_ is reserved.')
if len(self.vrf) < 1 or len(self.vrf) > 31:
self.module.fail_json(
msg='Error: The vrf name length must be between 1 and 31.')
def get_interface_vpn_name(self, vpninfo, vpn_name):
""" get vpn instance name"""
l3vpn_if = vpninfo.findall("l3vpnIf")
for l3vpn_ifinfo in l3vpn_if:
for ele in l3vpn_ifinfo:
if ele.tag in ['ifName']:
if ele.text == self.vpn_interface:
self.intf_info['vrfName'] = vpn_name
def get_interface_vpn(self):
""" get the VPN instance associated with the interface"""
xml_str = CE_NC_GET_VRF_INTERFACE
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get global vrf interface info
root = ElementTree.fromstring(xml_str)
vpns = root.findall(
"data/l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance")
if vpns:
for vpnele in vpns:
vpn_name = None
for vpninfo in vpnele:
if vpninfo.tag == 'vrfName':
vpn_name = vpninfo.text
if vpninfo.tag == 'l3vpnIfs':
self.get_interface_vpn_name(vpninfo, vpn_name)
return
def is_vrf_exist(self):
""" judge whether the VPN instance is existed"""
conf_str = CE_NC_GET_VRF % self.vrf
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return False
return True
def get_intf_conf_info(self):
""" get related configuration of the interface"""
conf_str = CE_NC_GET_INTF % self.vpn_interface
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return
# get interface base info
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
interface = root.find("data/ifm/interfaces/interface")
if interface:
for eles in interface:
if eles.tag in ["isL2SwitchPort"]:
self.intf_info[eles.tag] = eles.text
self.get_interface_vpn()
return
def get_existing(self):
"""get existing config"""
self.existing = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def get_proposed(self):
"""get_proposed"""
self.proposed = dict(vrf=self.vrf,
vpn_interface=self.vpn_interface,
state=self.state)
def get_end_state(self):
"""get_end_state"""
self.intf_info['vrfName'] = None
self.get_intf_conf_info()
self.end_state = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def show_result(self):
""" show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
""" judge whether configuration has existed"""
if self.state == 'absent':
return False
delta = set(self.proposed.items()).difference(
self.existing.items())
delta = dict(delta)
if len(delta) == 1 and delta['state']:
return True
return False
def config_interface_vrf(self):
""" configure VPN instance of the interface"""
if not self.conf_exist and self.state == 'present':
xml_str = CE_NC_MERGE_VRF_INTERFACE % (
self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "VRF_INTERFACE_CONFIG")
self.changed = True
elif self.state == 'absent':
xml_str = CE_NC_DEL_INTF_VPN % (self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "DEL_VRF_INTERFACE_CONFIG")
self.changed = True
def work(self):
"""excute task"""
self.get_intf_conf_info()
self.check_params()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_interface_vrf()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""main"""
argument_spec = dict(
vrf=dict(required=True, type='str'),
vpn_interface=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
vrf_intf = VrfInterface(argument_spec)
vrf_intf.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
blackzw/openwrt_sdk_dev1 | staging_dir/host/lib/python2.7/lib2to3/fixes/fix_set_literal.py | 326 | 1699 | """
Optional fixer to transform set() calls to set literals.
"""
# Author: Benjamin Peterson
from lib2to3 import fixer_base, pytree
from lib2to3.fixer_util import token, syms
class FixSetLiteral(fixer_base.BaseFix):
BM_compatible = True
explicit = True
PATTERN = """power< 'set' trailer< '('
(atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
|
single=any) ']' >
|
atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
)
')' > >
"""
def transform(self, node, results):
single = results.get("single")
if single:
# Make a fake listmaker
fake = pytree.Node(syms.listmaker, [single.clone()])
single.replace(fake)
items = fake
else:
items = results["items"]
# Build the contents of the literal
literal = [pytree.Leaf(token.LBRACE, u"{")]
literal.extend(n.clone() for n in items.children)
literal.append(pytree.Leaf(token.RBRACE, u"}"))
# Set the prefix of the right brace to that of the ')' or ']'
literal[-1].prefix = items.next_sibling.prefix
maker = pytree.Node(syms.dictsetmaker, literal)
maker.prefix = node.prefix
# If the original was a one tuple, we need to remove the extra comma.
if len(maker.children) == 4:
n = maker.children[2]
n.remove()
maker.children[-1].prefix = n.prefix
# Finally, replace the set call with our shiny new literal.
return maker
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.