repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ANR-kamoulox/Telemeta | telemeta/south_migrations/0025_auto__chg_field_mediaitemmarker_author.py | 2 | 48564 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MediaItemMarker.author'
db.alter_column('media_markers', 'author_id', self.gf('telemeta.models.core.ForeignKey')(null=True, to=orm['auth.User']))
def backwards(self, orm):
# Changing field 'MediaItemMarker.author'
db.alter_column('media_markers', 'author_id', self.gf('telemeta.models.core.ForeignKey')(default=1, to=orm['auth.User']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'telemeta.acquisitionmode': {
'Meta': {'ordering': "['value']", 'object_name': 'AcquisitionMode', 'db_table': "'acquisition_modes'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.adconversion': {
'Meta': {'ordering': "['value']", 'object_name': 'AdConversion', 'db_table': "'ad_conversions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.contextkeyword': {
'Meta': {'ordering': "['value']", 'object_name': 'ContextKeyword', 'db_table': "'context_keywords'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.criteria': {
'Meta': {'object_name': 'Criteria', 'db_table': "'search_criteria'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'value': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.ethnicgroup': {
'Meta': {'ordering': "['value']", 'object_name': 'EthnicGroup', 'db_table': "'ethnic_groups'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.ethnicgroupalias': {
'Meta': {'ordering': "['ethnic_group__value']", 'unique_together': "(('ethnic_group', 'value'),)", 'object_name': 'EthnicGroupAlias', 'db_table': "'ethnic_group_aliases'"},
'ethnic_group': ('telemeta.models.core.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['telemeta.EthnicGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.format': {
'Meta': {'object_name': 'Format', 'db_table': "'media_formats'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'format'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['telemeta.MediaItem']", 'blank': 'True', 'null': 'True'}),
'original_audio_quality': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'original_channels': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.NumberOfChannels']"}),
'original_code': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'original_comments': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'original_location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'format'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['telemeta.Location']", 'blank': 'True', 'null': 'True'}),
'original_number': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'original_state': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'original_status': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'physical_format': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PhysicalFormat']"}),
'recording_system': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'sticker_presence': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'tape_reference': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'tape_speed': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.TapeSpeed']"}),
'tape_thickness': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'tape_vendor': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.TapeVendor']"}),
'tape_wheel_diameter': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'format'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.TapeWheelDiameter']"})
},
'telemeta.genericstyle': {
'Meta': {'ordering': "['value']", 'object_name': 'GenericStyle', 'db_table': "'generic_styles'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.instrument': {
'Meta': {'object_name': 'Instrument', 'db_table': "'instruments'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.instrumentalias': {
'Meta': {'object_name': 'InstrumentAlias', 'db_table': "'instrument_aliases'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.instrumentaliasrelation': {
'Meta': {'unique_together': "(('alias', 'instrument'),)", 'object_name': 'InstrumentAliasRelation', 'db_table': "'instrument_alias_relations'"},
'alias': ('telemeta.models.core.ForeignKey', [], {'related_name': "'other_name'", 'to': "orm['telemeta.InstrumentAlias']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('telemeta.models.core.ForeignKey', [], {'related_name': "'relation'", 'to': "orm['telemeta.InstrumentAlias']"})
},
'telemeta.instrumentrelation': {
'Meta': {'unique_together': "(('instrument', 'parent_instrument'),)", 'object_name': 'InstrumentRelation', 'db_table': "'instrument_relations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('telemeta.models.core.ForeignKey', [], {'related_name': "'parent_relation'", 'to': "orm['telemeta.Instrument']"}),
'parent_instrument': ('telemeta.models.core.ForeignKey', [], {'related_name': "'child_relation'", 'to': "orm['telemeta.Instrument']"})
},
'telemeta.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language', 'db_table': "'languages'"},
'comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'part1': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '1', 'blank': 'True'}),
'part2B': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'part2T': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'scope': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '1', 'blank': 'True'}),
'type': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '1', 'blank': 'True'})
},
'telemeta.legalright': {
'Meta': {'ordering': "['value']", 'object_name': 'LegalRight', 'db_table': "'legal_rights'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "'locations'"},
'complete_type': ('telemeta.models.core.ForeignKey', [], {'related_name': "'locations'", 'to': "orm['telemeta.LocationType']"}),
'current_location': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'past_names'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.Location']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_authoritative': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'latitude': ('telemeta.models.core.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'longitude': ('telemeta.models.core.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '150'}),
'type': ('telemeta.models.core.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'})
},
'telemeta.locationalias': {
'Meta': {'ordering': "['alias']", 'unique_together': "(('location', 'alias'),)", 'object_name': 'LocationAlias', 'db_table': "'location_aliases'"},
'alias': ('telemeta.models.core.CharField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_authoritative': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['telemeta.Location']"})
},
'telemeta.locationrelation': {
'Meta': {'ordering': "['ancestor_location__name']", 'unique_together': "(('location', 'ancestor_location'),)", 'object_name': 'LocationRelation', 'db_table': "'location_relations'"},
'ancestor_location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'descendant_relations'", 'to': "orm['telemeta.Location']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_authoritative': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'is_direct': ('telemeta.models.core.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'location': ('telemeta.models.core.ForeignKey', [], {'related_name': "'ancestor_relations'", 'to': "orm['telemeta.Location']"})
},
'telemeta.locationtype': {
'Meta': {'ordering': "['name']", 'object_name': 'LocationType', 'db_table': "'location_types'"},
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('telemeta.models.core.CharField', [], {'max_length': '150'})
},
'telemeta.mediacollection': {
'Meta': {'ordering': "['code']", 'object_name': 'MediaCollection', 'db_table': "'media_collections'"},
'a_informer_07_03': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'acquisition_mode': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.AcquisitionMode']"}),
'ad_conversion': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.AdConversion']"}),
'alt_ids': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'alt_title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'approx_duration': ('telemeta.models.core.DurationField', [], {'default': "'0'", 'blank': 'True'}),
'booklet_author': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'booklet_description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'cnrs_contributor': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'}),
'collector': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'collector_is_creator': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'conservation_site': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'creator': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'doctype_code': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'external_references': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'items_done': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'legal_rights': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.LegalRight']"}),
'metadata_author': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.MetadataAuthor']"}),
'metadata_writer': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.MetadataWriter']"}),
'old_code': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'physical_format': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PhysicalFormat']"}),
'physical_items_num': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'publisher': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.Publisher']"}),
'publisher_collection': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PublisherCollection']"}),
'publisher_serial': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'publishing_status': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.PublishingStatus']"}),
'recorded_from_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'recorded_to_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'recording_context': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'collections'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.RecordingContext']"}),
'reference': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'travail': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'year_published': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'telemeta.mediacollectionrelated': {
'Meta': {'object_name': 'MediaCollectionRelated', 'db_table': "'media_collection_related'"},
'collection': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaCollection']"}),
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediacorpus': {
'Meta': {'object_name': 'MediaCorpus', 'db_table': "'media_corpus'"},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'corpus'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['telemeta.MediaCollection']"}),
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'}),
'description': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'recorded_from_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'recorded_to_year': ('telemeta.models.core.IntegerField', [], {'default': '0', 'blank': 'True'}),
'reference': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.mediacorpusrelated': {
'Meta': {'object_name': 'MediaCorpusRelated', 'db_table': "'media_corpus_related'"},
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'resource': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaCorpus']"}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediafonds': {
'Meta': {'object_name': 'MediaFonds', 'db_table': "'media_fonds'"},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'fonds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['telemeta.MediaCorpus']"}),
'code': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'}),
'description': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'reference': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.mediafondsrelated': {
'Meta': {'object_name': 'MediaFondsRelated', 'db_table': "'media_fonds_related'"},
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'resource': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaFonds']"}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediaitem': {
'Meta': {'object_name': 'MediaItem', 'db_table': "'media_items'"},
'alt_title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'approx_duration': ('telemeta.models.core.DurationField', [], {'default': "'0'", 'blank': 'True'}),
'author': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'code': ('telemeta.models.core.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '250', 'blank': 'True'}),
'collection': ('telemeta.models.core.ForeignKey', [], {'related_name': "'items'", 'to': "orm['telemeta.MediaCollection']"}),
'collector': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'collector_from_collection': ('telemeta.models.core.BooleanField', [], {'default': 'False'}),
'collector_selection': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'context_comment': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'contributor': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'copied_from_item': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'copies'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.MediaItem']"}),
'creator_reference': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'cultural_area': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'depositor': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'digitalist': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'digitization_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'ethnic_group': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'items'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.EthnicGroup']"}),
'external_references': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '1024', 'db_column': "'filename'", 'blank': 'True'}),
'generic_style': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'items'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.GenericStyle']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'language_iso': ('telemeta.models.core.ForeignKey', [], {'related_name': "'items'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['telemeta.Language']", 'blank': 'True', 'null': 'True'}),
'location': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Location']", 'null': 'True', 'blank': 'True'}),
'location_comment': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'mimetype': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'moda_execut': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'old_code': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'organization': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Organization']", 'null': 'True', 'blank': 'True'}),
'public_access': ('telemeta.models.core.CharField', [], {'default': "'metadata'", 'max_length': '16', 'blank': 'True'}),
'publishing_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recorded_from_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recorded_to_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recordist': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'rights': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Rights']", 'null': 'True', 'blank': 'True'}),
'scientist': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'summary': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'topic': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'to': "orm['telemeta.Topic']", 'null': 'True', 'blank': 'True'}),
'track': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'vernacular_style': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'items'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.VernacularStyle']"})
},
'telemeta.mediaitemanalysis': {
'Meta': {'ordering': "['name']", 'object_name': 'MediaItemAnalysis', 'db_table': "'media_analysis'"},
'analyzer_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'analysis'", 'to': "orm['telemeta.MediaItem']"}),
'name': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'unit': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'})
},
'telemeta.mediaitemkeyword': {
'Meta': {'unique_together': "(('item', 'keyword'),)", 'object_name': 'MediaItemKeyword', 'db_table': "'media_item_keywords'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'keyword_relations'", 'to': "orm['telemeta.MediaItem']"}),
'keyword': ('telemeta.models.core.ForeignKey', [], {'related_name': "'item_relations'", 'to': "orm['telemeta.ContextKeyword']"})
},
'telemeta.mediaitemmarker': {
'Meta': {'object_name': 'MediaItemMarker', 'db_table': "'media_markers'"},
'author': ('telemeta.models.core.ForeignKey', [], {'default': 'None', 'related_name': "'markers'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'markers'", 'to': "orm['telemeta.MediaItem']"}),
'public_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'time': ('telemeta.models.core.FloatField', [], {'default': '0', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'})
},
'telemeta.mediaitemperformance': {
'Meta': {'object_name': 'MediaItemPerformance', 'db_table': "'media_item_performances'"},
'alias': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'performances'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.InstrumentAlias']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('telemeta.models.core.WeakForeignKey', [], {'default': 'None', 'related_name': "'performances'", 'null': 'True', 'blank': 'True', 'to': "orm['telemeta.Instrument']"}),
'instruments_num': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'media_item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'performances'", 'to': "orm['telemeta.MediaItem']"}),
'musicians': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'})
},
'telemeta.mediaitemrelated': {
'Meta': {'object_name': 'MediaItemRelated', 'db_table': "'media_item_related'"},
'credits': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('telemeta.models.core.FileField', [], {'default': "''", 'max_length': '255', 'db_column': "'filename'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'related'", 'to': "orm['telemeta.MediaItem']"}),
'mime_type': ('telemeta.models.core.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'title': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'url': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'})
},
'telemeta.mediaitemtranscodingflag': {
'Meta': {'object_name': 'MediaItemTranscodingFlag', 'db_table': "'media_transcoding'"},
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'transcoding'", 'to': "orm['telemeta.MediaItem']"}),
'mime_type': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'value': ('telemeta.models.core.BooleanField', [], {'default': 'False'})
},
'telemeta.mediapart': {
'Meta': {'object_name': 'MediaPart', 'db_table': "'media_parts'"},
'end': ('telemeta.models.core.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('telemeta.models.core.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['telemeta.MediaItem']"}),
'start': ('telemeta.models.core.FloatField', [], {}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.metadataauthor': {
'Meta': {'ordering': "['value']", 'object_name': 'MetadataAuthor', 'db_table': "'metadata_authors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.metadatawriter': {
'Meta': {'ordering': "['value']", 'object_name': 'MetadataWriter', 'db_table': "'metadata_writers'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.numberofchannels': {
'Meta': {'ordering': "['value']", 'object_name': 'NumberOfChannels', 'db_table': "'original_channel_number'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.organization': {
'Meta': {'ordering': "['value']", 'object_name': 'Organization', 'db_table': "'organization'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.physicalformat': {
'Meta': {'ordering': "['value']", 'object_name': 'PhysicalFormat', 'db_table': "'physical_formats'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.playlist': {
'Meta': {'object_name': 'Playlist', 'db_table': "'playlists'"},
'author': ('telemeta.models.core.ForeignKey', [], {'related_name': "'playlists'", 'db_column': "'author'", 'to': "orm['auth.User']"}),
'description': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'title': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.playlistresource': {
'Meta': {'object_name': 'PlaylistResource', 'db_table': "'playlist_resources'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'playlist': ('telemeta.models.core.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['telemeta.Playlist']"}),
'public_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'resource_id': ('telemeta.models.core.CharField', [], {'max_length': '250'}),
'resource_type': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.publisher': {
'Meta': {'ordering': "['value']", 'object_name': 'Publisher', 'db_table': "'publishers'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.publishercollection': {
'Meta': {'ordering': "['value']", 'object_name': 'PublisherCollection', 'db_table': "'publisher_collections'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publisher': ('telemeta.models.core.ForeignKey', [], {'related_name': "'publisher_collections'", 'to': "orm['telemeta.Publisher']"}),
'value': ('telemeta.models.core.CharField', [], {'max_length': '250'})
},
'telemeta.publishingstatus': {
'Meta': {'ordering': "['value']", 'object_name': 'PublishingStatus', 'db_table': "'publishing_status'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.recordingcontext': {
'Meta': {'ordering': "['value']", 'object_name': 'RecordingContext', 'db_table': "'recording_contexts'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.revision': {
'Meta': {'object_name': 'Revision', 'db_table': "'revisions'"},
'change_type': ('telemeta.models.core.CharField', [], {'max_length': '16'}),
'element_id': ('telemeta.models.core.IntegerField', [], {}),
'element_type': ('telemeta.models.core.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('telemeta.models.core.ForeignKey', [], {'related_name': "'revisions'", 'db_column': "'username'", 'to': "orm['auth.User']"})
},
'telemeta.rights': {
'Meta': {'ordering': "['value']", 'object_name': 'Rights', 'db_table': "'rights'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.search': {
'Meta': {'ordering': "['-date']", 'object_name': 'Search', 'db_table': "'searches'"},
'criteria': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'search'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['telemeta.Criteria']"}),
'date': ('telemeta.models.core.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('telemeta.models.core.ForeignKey', [], {'related_name': "'searches'", 'db_column': "'username'", 'to': "orm['auth.User']"})
},
'telemeta.tapelength': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeLength', 'db_table': "'tape_length'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapespeed': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeSpeed', 'db_table': "'tape_speed'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapevendor': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeVendor', 'db_table': "'tape_vendor'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapewheeldiameter': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeWheelDiameter', 'db_table': "'tape_wheel_diameter'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.tapewidth': {
'Meta': {'ordering': "['value']", 'object_name': 'TapeWidth', 'db_table': "'tape_width'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.topic': {
'Meta': {'ordering': "['value']", 'object_name': 'Topic', 'db_table': "'topic'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'telemeta.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'profiles'"},
'address': ('telemeta.models.core.TextField', [], {'default': "''", 'blank': 'True'}),
'attachment': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'department': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'expiration_date': ('telemeta.models.core.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'function': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'telephone': ('telemeta.models.core.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'user': ('telemeta.models.core.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'telemeta.vernacularstyle': {
'Meta': {'ordering': "['value']", 'object_name': 'VernacularStyle', 'db_table': "'vernacular_styles'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('telemeta.models.core.CharField', [], {'unique': 'True', 'max_length': '250'})
}
}
complete_apps = ['telemeta'] | agpl-3.0 |
vprime/puuuu | env/lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py | 1710 | 76929 | from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| mit |
WillieMaddox/scipy | scipy/sparse/linalg/_onenormest.py | 96 | 15138 | """Sparse block 1-norm estimator.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse.linalg import aslinearoperator
__all__ = ['onenormest']
def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can be transposed and that can
produce matrix products.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
Notes
-----
This is algorithm 2.4 of [1].
In [2] it is described as follows.
"This algorithm typically requires the evaluation of
about 4t matrix-vector products and almost invariably
produces a norm estimate (which is, in fact, a lower
bound on the norm) correct to within a factor 3."
.. versionadded:: 0.13.0
References
----------
.. [1] Nicholas J. Higham and Francoise Tisseur (2000),
"A Block Algorithm for Matrix 1-Norm Estimation,
with an Application to 1-Norm Pseudospectra."
SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201.
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009),
"A new scaling and squaring algorithm for the matrix exponential."
SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989.
"""
# Check the input.
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected the operator to act like a square matrix')
# If the operator size is small compared to t,
# then it is easier to compute the exact norm.
# Otherwise estimate the norm.
n = A.shape[1]
if t >= n:
A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n)))
if A_explicit.shape != (n, n):
raise Exception('internal error: ',
'unexpected shape ' + str(A_explicit.shape))
col_abs_sums = abs(A_explicit).sum(axis=0)
if col_abs_sums.shape != (n, ):
raise Exception('internal error: ',
'unexpected shape ' + str(col_abs_sums.shape))
argmax_j = np.argmax(col_abs_sums)
v = elementary_vector(n, argmax_j)
w = A_explicit[:, argmax_j]
est = col_abs_sums[argmax_j]
else:
est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax)
# Report the norm estimate along with some certificates of the estimate.
if compute_v or compute_w:
result = (est,)
if compute_v:
result += (v,)
if compute_w:
result += (w,)
return result
else:
return est
def _blocked_elementwise(func):
"""
Decorator for an elementwise function, to apply it blockwise along
first dimension, to avoid excessive memory usage in temporaries.
"""
block_size = 2**20
def wrapper(x):
if x.shape[0] < block_size:
return func(x)
else:
y0 = func(x[:block_size])
y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)
y[:block_size] = y0
del y0
for j in range(block_size, x.shape[0], block_size):
y[j:j+block_size] = func(x[j:j+block_size])
return y
return wrapper
@_blocked_elementwise
def sign_round_up(X):
"""
This should do the right thing for both real and complex matrices.
From Higham and Tisseur:
"Everything in this section remains valid for complex matrices
provided that sign(A) is redefined as the matrix (aij / |aij|)
(and sign(0) = 1) transposes are replaced by conjugate transposes."
"""
Y = X.copy()
Y[Y == 0] = 1
Y /= np.abs(Y)
return Y
@_blocked_elementwise
def _max_abs_axis1(X):
return np.max(np.abs(X), axis=1)
def _sum_abs_axis0(X):
block_size = 2**20
r = None
for j in range(0, X.shape[0], block_size):
y = np.sum(np.abs(X[j:j+block_size]), axis=0)
if r is None:
r = y
else:
r += y
return r
def elementary_vector(n, i):
v = np.zeros(n, dtype=float)
v[i] = 1
return v
def vectors_are_parallel(v, w):
# Columns are considered parallel when they are equal or negative.
# Entries are required to be in {-1, 1},
# which guarantees that the magnitudes of the vectors are identical.
if v.ndim != 1 or v.shape != w.shape:
raise ValueError('expected conformant vectors with entries in {-1,1}')
n = v.shape[0]
return np.dot(v, w) == n
def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y):
for v in X.T:
if not any(vectors_are_parallel(v, w) for w in Y.T):
return False
return True
def column_needs_resampling(i, X, Y=None):
# column i of X needs resampling if either
# it is parallel to a previous column of X or
# it is parallel to a column of Y
n, t = X.shape
v = X[:, i]
if any(vectors_are_parallel(v, X[:, j]) for j in range(i)):
return True
if Y is not None:
if any(vectors_are_parallel(v, w) for w in Y.T):
return True
return False
def resample_column(i, X):
X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
def _algorithm_2_2(A, AT, t):
"""
This is Algorithm 2.2.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Returns
-------
g : sequence
A non-negative decreasing vector
such that g[j] is a lower bound for the 1-norm
of the column of A of jth largest 1-norm.
The first entry of this vector is therefore a lower bound
on the 1-norm of the linear operator A.
This sequence has length t.
ind : sequence
The ith entry of ind is the index of the column A whose 1-norm
is given by g[i].
This sequence of indices has length t, and its entries are
chosen from range(n), possibly with repetition,
where n is the order of the operator A.
Notes
-----
This algorithm is mainly for testing.
It uses the 'ind' array in a way that is similar to
its usage in algorithm 2.4. This algorithm 2.2 may be easier to test,
so it gives a chance of uncovering bugs related to indexing
which could have propagated less noticeably to algorithm 2.4.
"""
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
n = A_linear_operator.shape[0]
# Initialize the X block with columns of unit 1-norm.
X = np.ones((n, t))
if t > 1:
X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1
X /= float(n)
# Iteratively improve the lower bounds.
# Track extra things, to assert invariants for debugging.
g_prev = None
h_prev = None
k = 1
ind = range(t)
while True:
Y = np.asarray(A_linear_operator.matmat(X))
g = _sum_abs_axis0(Y)
best_j = np.argmax(g)
g.sort()
g = g[::-1]
S = sign_round_up(Y)
Z = np.asarray(AT_linear_operator.matmat(S))
h = _max_abs_axis1(Z)
# If this algorithm runs for fewer than two iterations,
# then its return values do not have the properties indicated
# in the description of the algorithm.
# In particular, the entries of g are not 1-norms of any
# column of A until the second iteration.
# Therefore we will require the algorithm to run for at least
# two iterations, even though this requirement is not stated
# in the description of the algorithm.
if k >= 2:
if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])):
break
ind = np.argsort(h)[::-1][:t]
h = h[ind]
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
# Check invariant (2.2).
if k >= 2:
if not less_than_or_close(g_prev[0], h_prev[0]):
raise Exception('invariant (2.2) is violated')
if not less_than_or_close(h_prev[0], g[0]):
raise Exception('invariant (2.2) is violated')
# Check invariant (2.3).
if k >= 3:
for j in range(t):
if not less_than_or_close(g[j], g_prev[j]):
raise Exception('invariant (2.3) is violated')
# Update for the next iteration.
g_prev = g
h_prev = h
k += 1
# Return the lower bounds and the corresponding column indices.
return g, ind
def _onenormest_core(A, AT, t, itmax):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4.
"""
# This function is a more or less direct translation
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
if itmax < 2:
raise ValueError('at least two iterations are required')
if t < 1:
raise ValueError('at least one column is required')
n = A.shape[0]
if t >= n:
raise ValueError('t should be smaller than the order of A')
# Track the number of big*small matrix multiplications
# and the number of resamplings.
nmults = 0
nresamples = 0
# "We now explain our choice of starting matrix. We take the first
# column of X to be the vector of 1s [...] This has the advantage that
# for a matrix with nonnegative elements the algorithm converges
# with an exact estimate on the second iteration, and such matrices
# arise in applications [...]"
X = np.ones((n, t), dtype=float)
# "The remaining columns are chosen as rand{-1,1},
# with a check for and correction of parallel columns,
# exactly as for S in the body of the algorithm."
if t > 1:
for i in range(1, t):
# These are technically initial samples, not resamples,
# so the resampling count is not incremented.
resample_column(i, X)
for i in range(t):
while column_needs_resampling(i, X):
resample_column(i, X)
nresamples += 1
# "Choose starting matrix X with columns of unit 1-norm."
X /= float(n)
# "indices of used unit vectors e_j"
ind_hist = np.zeros(0, dtype=np.intp)
est_old = 0
S = np.zeros((n, t), dtype=float)
k = 1
ind = None
while True:
Y = np.asarray(A_linear_operator.matmat(X))
nmults += 1
mags = _sum_abs_axis0(Y)
est = np.max(mags)
best_j = np.argmax(mags)
if est > est_old or k == 2:
if k >= 2:
ind_best = ind[best_j]
w = Y[:, best_j]
# (1)
if k >= 2 and est <= est_old:
est = est_old
break
est_old = est
S_old = S
if k > itmax:
break
S = sign_round_up(Y)
del Y
# (2)
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
break
if t > 1:
# "Ensure that no column of S is parallel to another column of S
# or to a column of S_old by replacing columns of S by rand{-1,1}."
for i in range(t):
while column_needs_resampling(i, S, S_old):
resample_column(i, S)
nresamples += 1
del S_old
# (3)
Z = np.asarray(AT_linear_operator.matmat(S))
nmults += 1
h = _max_abs_axis1(Z)
del Z
# (4)
if k >= 2 and max(h) == h[ind_best]:
break
# "Sort h so that h_first >= ... >= h_last
# and re-order ind correspondingly."
#
# Later on, we will need at most t+len(ind_hist) largest
# entries, so drop the rest
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
del h
if t > 1:
# (5)
# Break if the most promising t vectors have been visited already.
if np.in1d(ind[:t], ind_hist).all():
break
# Put the most promising unvisited vectors at the front of the list
# and put the visited vectors at the end of the list.
# Preserve the order of the indices induced by the ordering of h.
seen = np.in1d(ind, ind_hist)
ind = np.concatenate((ind[~seen], ind[seen]))
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)]
ind_hist = np.concatenate((ind_hist, new_ind))
k += 1
v = elementary_vector(n, ind_best)
return est, v, w, nmults, nresamples
| bsd-3-clause |
wdwvt1/scikit-bio | skbio/stats/ordination/tests/test_ordination.py | 3 | 35844 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
from six import binary_type, text_type
import warnings
import unittest
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
from IPython.core.display import Image, SVG
from nose.tools import assert_is_instance, assert_true
from scipy.spatial.distance import pdist
from skbio import DistanceMatrix
from skbio.stats.ordination import (
CA, RDA, CCA, PCoA, OrdinationResults, corr, mean_and_std,
assert_ordination_results_equal)
from skbio.util import get_data_path
def normalize_signs(arr1, arr2):
"""Change column signs so that "column" and "-column" compare equal.
This is needed because results of eigenproblmes can have signs
flipped, but they're still right.
Notes
=====
This function tries hard to make sure that, if you find "column"
and "-column" almost equal, calling a function like np.allclose to
compare them after calling `normalize_signs` succeeds.
To do so, it distinguishes two cases for every column:
- It can be all almost equal to 0 (this includes a column of
zeros).
- Otherwise, it has a value that isn't close to 0.
In the first case, no sign needs to be flipped. I.e., for
|epsilon| small, np.allclose(-epsilon, 0) is true if and only if
np.allclose(epsilon, 0) is.
In the second case, the function finds the number in the column
whose absolute value is largest. Then, it compares its sign with
the number found in the same index, but in the other array, and
flips the sign of the column as needed.
"""
# Let's convert everyting to floating point numbers (it's
# reasonable to assume that eigenvectors will already be floating
# point numbers). This is necessary because np.array(1) /
# np.array(0) != np.array(1.) / np.array(0.)
arr1 = np.asarray(arr1, dtype=np.float64)
arr2 = np.asarray(arr2, dtype=np.float64)
if arr1.shape != arr2.shape:
raise ValueError(
"Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
arr2.shape)
)
# To avoid issues around zero, we'll compare signs of the values
# with highest absolute value
max_idx = np.abs(arr1).argmax(axis=0)
max_arr1 = arr1[max_idx, range(arr1.shape[1])]
max_arr2 = arr2[max_idx, range(arr2.shape[1])]
sign_arr1 = np.sign(max_arr1)
sign_arr2 = np.sign(max_arr2)
# Store current warnings, and ignore division by zero (like 1. /
# 0.) and invalid operations (like 0. / 0.)
wrn = np.seterr(invalid='ignore', divide='ignore')
differences = sign_arr1 / sign_arr2
# The values in `differences` can be:
# 1 -> equal signs
# -1 -> diff signs
# Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
np.seterr(**wrn)
# Now let's deal with cases where `differences != \pm 1`
special_cases = (~np.isfinite(differences)) | (differences == 0)
# In any of these cases, the sign of the column doesn't matter, so
# let's just keep it
differences[special_cases] = 1
return arr1 * differences, arr2
def chi_square_distance(data_table, between_rows=True):
"""Computes the chi-square distance between two rows or columns of input.
It is a measure that has no upper limit, and it excludes double-zeros.
Parameters
----------
data_table : 2D array_like
An array_like object of shape (n, p). The input must be a
frequency table (so that the sum of all cells equals 1, and
all values are non-negative).
between_rows : bool (defaults to True)
Indicates whether distance is computed between rows (default)
or columns.
Returns
-------
Y : ndarray
Returns a condensed distance matrix. For each i and j (where
i<j<n), the chi square distance between u=X[i] and v=X[j] is
computed and stored in `Y[(n choose 2) - (n - i choose 2) + (j
- i - 1)]`.
See Also
--------
scipy.spatial.distance.squareform
References
----------
This coefficient appears in Legendre and Legendre (1998) as
formula 7.54 (as D_{16}). Another source is
http://www.springerreference.com/docs/html/chapterdbid/60817.html
"""
data_table = np.asarray(data_table, dtype=np.float64)
if not np.allclose(data_table.sum(), 1):
raise ValueError("Input is not a frequency table: if it is an"
" abundance table you could scale it as"
" `data_table / data_table.sum()`.")
if np.any(data_table < 0):
raise ValueError("A frequency table can't have negative values.")
# The distances are always computed between the rows of F
F = data_table if between_rows else data_table.T
row_sums = F.sum(axis=1, keepdims=True)
column_sums = F.sum(axis=0)
scaled_F = F / (row_sums * np.sqrt(column_sums))
return pdist(scaled_F, 'euclidean')
class TestNormalizeSigns(object):
def test_shapes_and_nonarray_input(self):
with npt.assert_raises(ValueError):
normalize_signs([[1, 2], [3, 5]], [[1, 2]])
def test_works_when_different(self):
"""Taking abs value of everything would lead to false
positives."""
a = np.array([[1, -1],
[2, 2]])
b = np.array([[-1, -1],
[2, 2]])
with npt.assert_raises(AssertionError):
npt.assert_equal(*normalize_signs(a, b))
def test_easy_different(self):
a = np.array([[1, 2],
[3, -1]])
b = np.array([[-1, 2],
[-3, -1]])
npt.assert_equal(*normalize_signs(a, b))
def test_easy_already_equal(self):
a = np.array([[1, -2],
[3, 1]])
b = a.copy()
npt.assert_equal(*normalize_signs(a, b))
def test_zeros(self):
a = np.array([[0, 3],
[0, -1]])
b = np.array([[0, -3],
[0, 1]])
npt.assert_equal(*normalize_signs(a, b))
def test_hard(self):
a = np.array([[0, 1],
[1, 2]])
b = np.array([[0, 1],
[-1, 2]])
npt.assert_equal(*normalize_signs(a, b))
def test_harder(self):
"""We don't want a value that might be negative due to
floating point inaccuracies to make a call to allclose in the
result to be off."""
a = np.array([[-1e-15, 1],
[5, 2]])
b = np.array([[1e-15, 1],
[5, 2]])
# Clearly a and b would refer to the same "column
# eigenvectors" but a slopppy implementation of
# normalize_signs could change the sign of column 0 and make a
# comparison fail
npt.assert_almost_equal(*normalize_signs(a, b))
def test_column_zeros(self):
a = np.array([[0, 1],
[0, 2]])
b = np.array([[0, -1],
[0, -2]])
npt.assert_equal(*normalize_signs(a, b))
def test_column_almost_zero(self):
a = np.array([[1e-15, 3],
[-2e-14, -6]])
b = np.array([[0, 3],
[-1e-15, -6]])
npt.assert_almost_equal(*normalize_signs(a, b))
class TestChiSquareDistance(object):
def test_errors(self):
a = np.array([[-0.5, 0],
[1, 0.5]])
with npt.assert_raises(ValueError):
chi_square_distance(a)
b = np.array([[0.5, 0],
[0.5, 0.1]])
with npt.assert_raises(ValueError):
chi_square_distance(b)
def test_results(self):
"""Some random numbers."""
a = np.array([[0.02808988764, 0.056179775281, 0.084269662921,
0.140449438202],
[0.01404494382, 0.196629213483, 0.109550561798,
0.033707865169],
[0.02808988764, 0.112359550562, 0.056179775281,
0.140449438202]])
dist = chi_square_distance(a)
expected = [0.91413919964333856,
0.33651110106124049,
0.75656884966269089]
npt.assert_almost_equal(dist, expected)
def test_results2(self):
"""A tiny example from Legendre & Legendre 1998, p. 285."""
a = np.array([[0, 1, 1],
[1, 0, 0],
[0, 4, 4]])
dist = chi_square_distance(a / a.sum())
# Note L&L used a terrible calculator because they got a wrong
# number (says it's 3.477) :(
expected = [3.4785054261852175, 0, 3.4785054261852175]
npt.assert_almost_equal(dist, expected)
class TestUtils(object):
def setup(self):
self.x = np.array([[1, 2, 3], [4, 5, 6]])
self.y = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
def test_mean_and_std_no_mean_no_std(self):
with npt.assert_raises(ValueError):
mean_and_std(self.x, with_mean=False, with_std=False)
def test_corr_shape_mismatch(self):
with npt.assert_raises(ValueError):
corr(self.x, self.y)
def test_assert_ordination_results_equal(self):
minimal1 = OrdinationResults([1, 2])
# a minimal set of results should be equal to itself
assert_ordination_results_equal(minimal1, minimal1)
# type mismatch
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, 'foo')
# numeric values should be checked that they're almost equal
almost_minimal1 = OrdinationResults([1.0000001, 1.9999999])
assert_ordination_results_equal(minimal1, almost_minimal1)
# species_ids missing in one, present in the other
almost_minimal1.species_ids = ['abc', 'def']
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
almost_minimal1.species_ids = None
# site_ids missing in one, present in the other
almost_minimal1.site_ids = ['abc', 'def']
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
almost_minimal1.site_ids = None
# test each of the optional numeric attributes
for attr in ('species', 'site', 'biplot', 'site_constraints',
'proportion_explained'):
# missing optional numeric attribute in one, present in the other
setattr(almost_minimal1, attr, [[1, 2], [3, 4]])
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(almost_minimal1, attr, None)
# optional numeric attributes present in both, but not almost equal
setattr(minimal1, attr, [[1, 2], [3, 4]])
setattr(almost_minimal1, attr, [[1, 2], [3.00002, 4]])
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(minimal1, attr, None)
setattr(almost_minimal1, attr, None)
# optional numeric attributes present in both, and almost equal
setattr(minimal1, attr, [[1, 2], [3, 4]])
setattr(almost_minimal1, attr, [[1, 2], [3.00000002, 4]])
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(minimal1, attr, None)
setattr(almost_minimal1, attr, None)
class TestCAResults(object):
def setup(self):
"""Data from table 9.11 in Legendre & Legendre 1998."""
self.X = np.loadtxt(get_data_path('L&L_CA_data'))
self.ordination = CA(self.X, ['Site1', 'Site2', 'Site3'],
['Species1', 'Species2', 'Species3'])
def test_scaling2(self):
scores = self.ordination.scores(scaling=2)
# p. 460 L&L 1998
F_hat = np.array([[0.40887, -0.06955],
[-0.11539, 0.29977],
[-0.30997, -0.18739]])
npt.assert_almost_equal(*normalize_signs(F_hat, scores.species),
decimal=5)
V_hat = np.array([[-0.84896, -0.88276],
[-0.22046, 1.34482],
[1.66697, -0.47032]])
npt.assert_almost_equal(*normalize_signs(V_hat, scores.site),
decimal=5)
def test_scaling1(self):
scores = self.ordination.scores(scaling=1)
# p. 458
V = np.array([[1.31871, -0.34374],
[-0.37215, 1.48150],
[-0.99972, -0.92612]])
npt.assert_almost_equal(*normalize_signs(V, scores.species), decimal=5)
F = np.array([[-0.26322, -0.17862],
[-0.06835, 0.27211],
[0.51685, -0.09517]])
npt.assert_almost_equal(*normalize_signs(F, scores.site), decimal=5)
def test_maintain_chi_square_distance_scaling1(self):
"""In scaling 1, chi^2 distance among rows (sites) is equal to
euclidean distance between them in transformed space."""
frequencies = self.X / self.X.sum()
chi2_distances = chi_square_distance(frequencies)
transformed_sites = self.ordination.scores(1).site
euclidean_distances = pdist(transformed_sites, 'euclidean')
npt.assert_almost_equal(chi2_distances, euclidean_distances)
def test_maintain_chi_square_distance_scaling2(self):
"""In scaling 2, chi^2 distance among columns (species) is
equal to euclidean distance between them in transformed space."""
frequencies = self.X / self.X.sum()
chi2_distances = chi_square_distance(frequencies, between_rows=False)
transformed_species = self.ordination.scores(2).species
euclidean_distances = pdist(transformed_species, 'euclidean')
npt.assert_almost_equal(chi2_distances, euclidean_distances)
class TestCAErrors(object):
def test_negative(self):
X = np.array([[1, 2], [-0.1, -2]])
with npt.assert_raises(ValueError):
CA(X, None, None)
class TestRDAErrors(object):
def test_shape(self):
for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
Y = np.random.randn(n, p)
X = np.random.randn(n_, m)
yield npt.assert_raises, ValueError, RDA, Y, X, None, None
class TestRDAResults(object):
# STATUS: L&L only shows results with scaling 1, and they agree
# with vegan's (module multiplying by a constant). I can also
# compute scaling 2, agreeing with vegan, but there are no written
# results in L&L.
def setup(self):
"""Data from table 11.3 in Legendre & Legendre 1998."""
Y = np.loadtxt(get_data_path('example2_Y'))
X = np.loadtxt(get_data_path('example2_X'))
self.ordination = RDA(Y, X,
['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5'])
def test_scaling1(self):
scores = self.ordination.scores(1)
# Load data as computed with vegan 2.0-8
vegan_species = np.loadtxt(get_data_path(
'example2_species_scaling1_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
vegan_site = np.loadtxt(get_data_path(
'example2_site_scaling1_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
def test_scaling2(self):
scores = self.ordination.scores(2)
# Load data as computed with vegan 2.0-8
vegan_species = np.loadtxt(get_data_path(
'example2_species_scaling2_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
vegan_site = np.loadtxt(get_data_path(
'example2_site_scaling2_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
class TestCCAErrors(object):
def setup(self):
"""Data from table 11.3 in Legendre & Legendre 1998."""
self.Y = np.loadtxt(get_data_path('example3_Y'))
self.X = np.loadtxt(get_data_path('example3_X'))
def test_shape(self):
X, Y = self.X, self.Y
with npt.assert_raises(ValueError):
CCA(Y, X[:-1], None, None)
def test_Y_values(self):
X, Y = self.X, self.Y
Y[0, 0] = -1
with npt.assert_raises(ValueError):
CCA(Y, X, None, None)
Y[0] = 0
with npt.assert_raises(ValueError):
CCA(Y, X, None, None)
class TestCCAResults(object):
def setup(self):
"""Data from table 11.3 in Legendre & Legendre 1998
(p. 590). Loaded results as computed with vegan 2.0-8 and
compared with table 11.5 if also there."""
Y = np.loadtxt(get_data_path('example3_Y'))
X = np.loadtxt(get_data_path('example3_X'))
self.ordination = CCA(Y, X[:, :-1],
['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5', 'Species6', 'Species7',
'Species8'])
def test_scaling1_species(self):
scores = self.ordination.scores(1)
vegan_species = np.loadtxt(get_data_path(
'example3_species_scaling1_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
def test_scaling1_site(self):
scores = self.ordination.scores(1)
vegan_site = np.loadtxt(get_data_path(
'example3_site_scaling1_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
def test_scaling2_species(self):
scores = self.ordination.scores(2)
vegan_species = np.loadtxt(get_data_path(
'example3_species_scaling2_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=5)
def test_scaling2_site(self):
scores = self.ordination.scores(2)
vegan_site = np.loadtxt(get_data_path(
'example3_site_scaling2_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
class TestPCoAResults(object):
def setup(self):
"""Sample data set from page 111 of W.J Krzanowski. Principles
of multivariate analysis, 2000, Oxford University Press."""
matrix = np.loadtxt(get_data_path('PCoA_sample_data'))
dist_matrix = DistanceMatrix(matrix, map(str, range(matrix.shape[0])))
self.dist_matrix = dist_matrix
def test_negative_eigenvalue_warning(self):
"""This data has some small negative eigenvalues."""
npt.assert_warns(RuntimeWarning, PCoA, self.dist_matrix)
def test_values(self):
"""Adapted from cogent's `test_principal_coordinate_analysis`:
"I took the example in the book (see intro info), and did the
principal coordinates analysis, plotted the data and it looked
right"."""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
ordination = PCoA(self.dist_matrix)
scores = ordination.scores()
exp_eigvals = np.array([0.73599103, 0.26260032, 0.14926222, 0.06990457,
0.02956972, 0.01931184, 0., 0., 0., 0., 0., 0.,
0., 0.])
exp_site = np.loadtxt(get_data_path('exp_PCoAzeros_site'))
exp_prop_expl = np.array([0.58105792, 0.20732046, 0.1178411,
0.05518899, 0.02334502, 0.01524651, 0., 0.,
0., 0., 0., 0., 0., 0.])
exp_site_ids = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13']
# Note the absolute value because column can have signs swapped
npt.assert_almost_equal(scores.eigvals, exp_eigvals)
npt.assert_almost_equal(np.abs(scores.site), exp_site)
npt.assert_almost_equal(scores.proportion_explained, exp_prop_expl)
npt.assert_equal(scores.site_ids, exp_site_ids)
class TestPCoAResultsExtensive(object):
def setup(self):
matrix = np.loadtxt(get_data_path('PCoA_sample_data_2'))
self.ids = [str(i) for i in range(matrix.shape[0])]
dist_matrix = DistanceMatrix(matrix, self.ids)
self.ordination = PCoA(dist_matrix)
def test_values(self):
results = self.ordination.scores()
npt.assert_equal(len(results.eigvals), len(results.site[0]))
expected = np.array([[-0.028597, 0.22903853, 0.07055272,
0.26163576, 0.28398669, 0.0],
[0.37494056, 0.22334055, -0.20892914,
0.05057395, -0.18710366, 0.0],
[-0.33517593, -0.23855979, -0.3099887,
0.11521787, -0.05021553, 0.0],
[0.25412394, -0.4123464, 0.23343642,
0.06403168, -0.00482608, 0.0],
[-0.28256844, 0.18606911, 0.28875631,
-0.06455635, -0.21141632, 0.0],
[0.01727687, 0.012458, -0.07382761,
-0.42690292, 0.1695749, 0.0]])
npt.assert_almost_equal(*normalize_signs(expected, results.site))
expected = np.array([0.3984635, 0.36405689, 0.28804535, 0.27479983,
0.19165361, 0.0])
npt.assert_almost_equal(results.eigvals, expected)
expected = np.array([0.2626621381, 0.2399817314, 0.1898758748,
0.1811445992, 0.1263356565, 0.0])
npt.assert_almost_equal(results.proportion_explained, expected)
npt.assert_equal(results.site_ids, self.ids)
class TestPCoAEigenResults(object):
def setup(self):
dist_matrix = DistanceMatrix.read(get_data_path('PCoA_sample_data_3'))
self.ordination = PCoA(dist_matrix)
self.ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
'PC.355', 'PC.607', 'PC.634']
def test_values(self):
results = self.ordination.scores()
npt.assert_almost_equal(len(results.eigvals), len(results.site[0]))
expected = np.loadtxt(get_data_path('exp_PCoAEigenResults_site'))
npt.assert_almost_equal(*normalize_signs(expected, results.site))
expected = np.array([0.51236726, 0.30071909, 0.26791207, 0.20898868,
0.19169895, 0.16054235, 0.15017696, 0.12245775,
0.0])
npt.assert_almost_equal(results.eigvals, expected)
expected = np.array([0.2675738328, 0.157044696, 0.1399118638,
0.1091402725, 0.1001110485, 0.0838401162,
0.0784269939, 0.0639511764, 0.0])
npt.assert_almost_equal(results.proportion_explained, expected)
npt.assert_equal(results.site_ids, self.ids)
class TestPCoAPrivateMethods(object):
def setup(self):
self.matrix = np.arange(1, 7).reshape(2, 3)
self.matrix2 = np.arange(1, 10).reshape(3, 3)
def test_E_matrix(self):
E = PCoA._E_matrix(self.matrix)
expected_E = np.array([[-0.5, -2., -4.5],
[-8., -12.5, -18.]])
npt.assert_almost_equal(E, expected_E)
def test_F_matrix(self):
F = PCoA._F_matrix(self.matrix2)
expected_F = np.zeros((3, 3))
# Note that `test_make_F_matrix` in cogent is wrong
npt.assert_almost_equal(F, expected_F)
class TestPCoAErrors(object):
def test_input(self):
with npt.assert_raises(TypeError):
PCoA([[1, 2], [3, 4]])
class TestOrdinationResults(unittest.TestCase):
def setUp(self):
# Define in-memory CA results to serialize and deserialize.
eigvals = np.array([0.0961330159181, 0.0409418140138])
species = np.array([[0.408869425742, 0.0695518116298],
[-0.1153860437, -0.299767683538],
[-0.309967102571, 0.187391917117]])
site = np.array([[-0.848956053187, 0.882764759014],
[-0.220458650578, -1.34482000302],
[1.66697179591, 0.470324389808]])
biplot = None
site_constraints = None
prop_explained = None
species_ids = ['Species1', 'Species2', 'Species3']
site_ids = ['Site1', 'Site2', 'Site3']
self.ordination_results = OrdinationResults(
eigvals=eigvals, species=species, site=site, biplot=biplot,
site_constraints=site_constraints,
proportion_explained=prop_explained, species_ids=species_ids,
site_ids=site_ids)
# DataFrame for testing plot method. Has a categorical column with a
# mix of numbers and strings. Has a numeric column with a mix of ints,
# floats, and strings that can be converted to floats. Has a numeric
# column with missing data (np.nan).
self.df = pd.DataFrame([['foo', '42', 10],
[22, 0, 8],
[22, -4.2, np.nan],
['foo', '42.19', 11]],
index=['A', 'B', 'C', 'D'],
columns=['categorical', 'numeric', 'nancolumn'])
# Minimal ordination results for easier testing of plotting method.
# Paired with df above.
eigvals = np.array([0.50, 0.25, 0.25])
site = np.array([[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]])
self.min_ord_results = OrdinationResults(eigvals=eigvals, site=site,
site_ids=['A', 'B', 'C', 'D'])
def test_str(self):
exp = ("Ordination results:\n"
"\tEigvals: 2\n"
"\tProportion explained: N/A\n"
"\tSpecies: 3x2\n"
"\tSite: 3x2\n"
"\tBiplot: N/A\n"
"\tSite constraints: N/A\n"
"\tSpecies IDs: 'Species1', 'Species2', 'Species3'\n"
"\tSite IDs: 'Site1', 'Site2', 'Site3'")
obs = str(self.ordination_results)
self.assertEqual(obs, exp)
# all optional attributes missing
exp = ("Ordination results:\n"
"\tEigvals: 1\n"
"\tProportion explained: N/A\n"
"\tSpecies: N/A\n"
"\tSite: N/A\n"
"\tBiplot: N/A\n"
"\tSite constraints: N/A\n"
"\tSpecies IDs: N/A\n"
"\tSite IDs: N/A")
obs = str(OrdinationResults(np.array([4.2])))
self.assertEqual(obs, exp)
def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
exp_legend_exists, exp_xlabel, exp_ylabel,
exp_zlabel):
# check type
assert_is_instance(fig, mpl.figure.Figure)
# check number of subplots
axes = fig.get_axes()
npt.assert_equal(len(axes), exp_num_subplots)
# check title
ax = axes[0]
npt.assert_equal(ax.get_title(), exp_title)
# shouldn't have tick labels
for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
ax.get_zticklabels()):
npt.assert_equal(tick_label.get_text(), '')
# check if legend is present
legend = ax.get_legend()
if exp_legend_exists:
assert_true(legend is not None)
else:
assert_true(legend is None)
# check axis labels
npt.assert_equal(ax.get_xlabel(), exp_xlabel)
npt.assert_equal(ax.get_ylabel(), exp_ylabel)
npt.assert_equal(ax.get_zlabel(), exp_zlabel)
def test_plot_no_metadata(self):
fig = self.min_ord_results.plot()
self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
def test_plot_with_numeric_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'numeric', axes=(1, 0, 2),
axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
self.check_basic_figure_sanity(
fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
def test_plot_with_categorical_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'categorical', axes=[2, 0, 1], title='a title',
cmap='Accent')
self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
def test_plot_with_invalid_axis_labels(self):
with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
self.min_ord_results.plot(axes=[2, 0, 1],
axis_labels=('a', 'b', 'c', 'd'))
def test_validate_plot_axes_valid_input(self):
# shouldn't raise an error on valid input. nothing is returned, so
# nothing to check here
self.min_ord_results._validate_plot_axes(self.min_ord_results.site.T,
(1, 2, 0))
def test_validate_plot_axes_invalid_input(self):
# not enough dimensions
with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
self.min_ord_results._validate_plot_axes(
np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
coord_matrix = self.min_ord_results.site.T
# wrong number of axes
with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
self.min_ord_results._validate_plot_axes(coord_matrix, [])
with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
self.min_ord_results._validate_plot_axes(coord_matrix,
(0, 1, 2, 3))
# duplicate axes
with six.assertRaisesRegex(self, ValueError, 'must be unique'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
# out of range axes
with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
def test_get_plot_point_colors_invalid_input(self):
# column provided without df
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(None, 'numeric',
['B', 'C'], 'jet')
# df provided without column
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(self.df, None,
['B', 'C'], 'jet')
# column not in df
with six.assertRaisesRegex(self, ValueError, 'missingcol'):
self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
['B', 'C'], 'jet')
# id not in df
with six.assertRaisesRegex(self, ValueError, 'numeric'):
self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
# missing data in df
with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
['B', 'C', 'A'], 'jet')
def test_get_plot_point_colors_no_df_or_column(self):
obs = self.min_ord_results._get_plot_point_colors(None, None,
['B', 'C'], 'jet')
npt.assert_equal(obs, (None, None))
def test_get_plot_point_colors_numeric_column(self):
# subset of the ids in df
exp = [0.0, -4.2, 42.0]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp)
assert_true(obs[1] is None)
# all ids in df
exp = [0.0, 42.0, 42.19, -4.2]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp)
assert_true(obs[1] is None)
def test_get_plot_point_colors_categorical_column(self):
# subset of the ids in df
exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
exp_color_dict = {
'foo': [0.5, 0., 0., 1.],
22: [0., 0., 0.5, 1.]
}
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
npt.assert_equal(obs[1], exp_color_dict)
# all ids in df
exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
[0., 0., 0.5, 1.]]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
# should get same color dict as before
npt.assert_equal(obs[1], exp_color_dict)
def test_plot_categorical_legend(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# we shouldn't have a legend yet
assert_true(ax.get_legend() is None)
self.min_ord_results._plot_categorical_legend(
ax, {'foo': 'red', 'bar': 'green'})
# make sure we have a legend now
legend = ax.get_legend()
assert_true(legend is not None)
# do some light sanity checking to make sure our input labels and
# colors are present. we're not using nose.tools.assert_items_equal
# because it isn't available in Python 3.
labels = [t.get_text() for t in legend.get_texts()]
npt.assert_equal(sorted(labels), ['bar', 'foo'])
colors = [l.get_color() for l in legend.get_lines()]
npt.assert_equal(sorted(colors), ['green', 'red'])
def test_repr_png(self):
obs = self.min_ord_results._repr_png_()
assert_is_instance(obs, binary_type)
assert_true(len(obs) > 0)
def test_repr_svg(self):
obs = self.min_ord_results._repr_svg_()
# print_figure(format='svg') can return text or bytes depending on the
# version of IPython
assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
assert_true(len(obs) > 0)
def test_png(self):
assert_is_instance(self.min_ord_results.png, Image)
def test_svg(self):
assert_is_instance(self.min_ord_results.svg, SVG)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
psi-rking/psi4 | psi4/driver/qcdb/mpl.py | 7 | 54234 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with matplotlib plotting routines. These are not hooked up to
any particular qcdb data structures but can be called with basic
arguments.
"""
import os
#import matplotlib
#matplotlib.use('Agg')
def expand_saveas(saveas, def_filename, def_path=os.path.abspath(os.curdir), def_prefix='', relpath=False):
"""Analyzes string *saveas* to see if it contains information on
path to save file, name to save file, both or neither (*saveas*
ends in '/' to indicate directory only) (able to expand '.'). A full
absolute filename is returned, lacking only file extension. Based on
analysis of missing parts of *saveas*, path information from *def_path*
and/or filename information from *def_prefix* + *def_filename* is
inserted. *def_prefix* is intended to be something like ``mplthread_``
to identify the type of figure.
"""
defname = def_prefix + def_filename.replace(' ', '_')
if saveas is None:
pth = def_path
fil = defname
else:
pth, fil = os.path.split(saveas)
pth = pth if pth != '' else def_path
fil = fil if fil != '' else defname
abspathfile = os.path.join(os.path.abspath(pth), fil)
if relpath:
return os.path.relpath(abspathfile, os.getcwd())
else:
return abspathfile
def segment_color(argcolor, saptcolor):
"""Find appropriate color expression between overall color directive
*argcolor* and particular color availibility *rxncolor*.
"""
import matplotlib
# validate any sapt color
if saptcolor is not None:
if saptcolor < 0.0 or saptcolor > 1.0:
saptcolor = None
if argcolor is None:
# no color argument, so take from rxn
if rxncolor is None:
clr = 'grey'
elif saptcolor is not None:
clr = matplotlib.cm.jet(saptcolor)
else:
clr = rxncolor
elif argcolor == 'sapt':
# sapt color from rxn if available
if saptcolor is not None:
clr = matplotlib.cm.jet(saptcolor)
else:
clr = 'grey'
elif argcolor == 'rgb':
# HB/MX/DD sapt color from rxn if available
if saptcolor is not None:
if saptcolor < 0.333:
clr = 'blue'
elif saptcolor < 0.667:
clr = 'green'
else:
clr = 'red'
else:
clr = 'grey'
else:
# color argument is name of mpl color
clr = argcolor
return clr
def bars(data, title='', saveas=None, relpath=False, graphicsformat=['pdf'], view=True):
"""Generates a 'gray-bars' diagram between model chemistries with error
statistics in list *data*, which is supplied as part of the dictionary
for each participating bar/modelchem, along with *mc* keys in argument
*data*. The plot is labeled with *title* and each bar with *mc* key and
plotted at a fixed scale to facilitate comparison across projects.
"""
import hashlib
import matplotlib.pyplot as plt
# initialize plot, fix dimensions for consistent Illustrator import
fig, ax = plt.subplots(figsize=(12, 7))
plt.ylim([0, 4.86])
plt.xlim([0, 6])
plt.xticks([])
# label plot and tiers
ax.text(0.4, 4.6, title,
verticalalignment='bottom', horizontalalignment='left',
family='Times New Roman', weight='bold', fontsize=12)
widths = [0.15, 0.02, 0.02, 0.02] # TT, HB, MX, DD
xval = 0.1 # starting posn along x-axis
# plot bar sets
for bar in data:
if bar is not None:
lefts = [xval, xval + 0.025, xval + 0.065, xval + 0.105]
rect = ax.bar(lefts, bar['data'], widths, linewidth=0)
rect[0].set_color('grey')
rect[1].set_color('red')
rect[2].set_color('green')
rect[3].set_color('blue')
ax.text(xval + .08, 4.3, bar['mc'],
verticalalignment='center', horizontalalignment='right', rotation='vertical',
family='Times New Roman', fontsize=8)
xval += 0.20
# save and show
pltuid = title + '_' + hashlib.sha1((title + repr([bar['mc'] for bar in data if bar is not None])).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='bar_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
def flat(data, color=None, title='', xlimit=4.0, xlines=[0.0, 0.3, 1.0], mae=None, mape=None, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Generates a slat diagram between model chemistries with errors in
single-item list *data*, which is supplied as part of the dictionary
for each participating reaction, along with *dbse* and *rxn* keys in
argument *data*. Limits of plot are *xlimit* from the zero-line. If
*color* is None, slats are black, if 'sapt', colors are taken from
sapt_colors module. Summary statistic *mae* is plotted on the
overbound side and relative statistic *mape* on the underbound side.
Saves a file with name *title* and plots to screen if *view*.
"""
import matplotlib.pyplot as plt
Nweft = 1
positions = range(-1, -1 * Nweft - 1, -1)
# initialize plot
fig, ax = plt.subplots(figsize=(12, 0.33))
plt.xlim([-xlimit, xlimit])
plt.ylim([-1 * Nweft - 1, 0])
plt.yticks([])
plt.xticks([])
# fig.patch.set_visible(False)
# ax.patch.set_visible(False)
ax.axis('off')
for xl in xlines:
plt.axvline(xl, color='grey', linewidth=4)
if xl != 0.0:
plt.axvline(-1 * xl, color='grey', linewidth=4)
# plot reaction errors and threads
for rxn in data:
xvals = rxn['data']
clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
ax.plot(xvals, positions, '|', color=clr, markersize=13.0, mew=4)
# plot trimmings
if mae is not None:
plt.axvline(-1 * mae, color='black', linewidth=12)
if mape is not None: # equivalent to MAE for a 10 kcal/mol interaction energy
ax.plot(0.025 * mape, positions, 'o', color='black', markersize=15.0)
# save and show
pltuid = title # simple (not really unique) filename for LaTeX integration
pltfile = expand_saveas(saveas, pltuid, def_prefix='flat_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight',
frameon=False, pad_inches=0.0)
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close() # give this a try
return files_saved
#def mpl_distslat_multiplot_files(pltfile, dbid, dbname, xmin, xmax, mcdats, labels, titles):
# """Saves a plot with basename *pltfile* with a slat representation
# of the modelchems errors in *mcdat*. Plot is in PNG, PDF, & EPS
# and suitable for download, no mouseover properties. Both labeled
# and labelless (for pub) figures are constructed.
#
# """
# import matplotlib as mpl
# from matplotlib.axes import Subplot
# import sapt_colors
# from matplotlib.figure import Figure
#
# nplots = len(mcdats)
# fht = nplots * 0.8
# fig, axt = plt.subplots(figsize=(12.0, fht))
# plt.subplots_adjust(left=0.01, right=0.99, hspace=0.3)
#
# axt.set_xticks([])
# axt.set_yticks([])
# plt.axis('off')
#
# for item in range(nplots):
# mcdat = mcdats[item]
# label = labels[item]
# title = titles[item]
#
# erdat = np.array(mcdat)
# yvals = np.ones(len(mcdat))
# y = np.array([sapt_colors.sapt_colors[dbname][i] for i in label])
#
# ax = Subplot(fig, nplots, 1, item + 1)
# fig.add_subplot(ax)
# sc = ax.scatter(erdat, yvals, c=y, s=3000, marker="|", cmap=mpl.cm.jet, vmin=0, vmax=1)
#
# ax.set_yticks([])
# ax.set_xticks([])
# ax.set_frame_on(False)
# ax.set_xlim([xmin, xmax])
#
# # Write files with only slats
# plt.savefig('scratch/' + pltfile + '_plain' + '.png', transparent=True, format='PNG')
# plt.savefig('scratch/' + pltfile + '_plain' + '.pdf', transparent=True, format='PDF')
# plt.savefig('scratch/' + pltfile + '_plain' + '.eps', transparent=True, format='EPS')
#
# # Rewrite files with guides and labels
# for item in range(nplots):
# ax_again = fig.add_subplot(nplots, 1, item + 1)
# ax_again.set_title(titles[item], fontsize=8)
# ax_again.text(xmin + 0.3, 1.0, stats(np.array(mcdats[item])), fontsize=7, family='monospace', verticalalignment='center')
# ax_again.plot([0, 0], [0.9, 1.1], color='#cccc00', lw=2)
# ax_again.set_frame_on(False)
# ax_again.set_yticks([])
# ax_again.set_xticks([-12.0, -8.0, -4.0, -2.0, -1.0, 0.0, 1.0, 2.0, 4.0, 8.0, 12.0])
# ax_again.tick_params(axis='both', which='major', labelbottom='off', bottom='off')
# ax_again.set_xticks([-12.0, -8.0, -4.0, -2.0, -1.0, 0.0, 1.0, 2.0, 4.0, 8.0, 12.0])
# ax_again.tick_params(axis='both', which='major', labelbottom='on', bottom='off')
#
# plt.savefig('scratch/' + pltfile + '_trimd' + '.png', transparent=True, format='PNG')
# plt.savefig('scratch/' + pltfile + '_trimd' + '.pdf', transparent=True, format='PDF')
# plt.savefig('scratch/' + pltfile + '_trimd' + '.eps', transparent=True, format='EPS')
def valerr(data, color=None, title='', xtitle='', view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""
"""
import hashlib
from itertools import cycle
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(4, 6))
ax1 = fig.add_subplot(211)
plt.axhline(0.0, axes=ax1, color='black')
ax1.set_ylabel('Reaction Energy')
plt.title(title)
ax2 = plt.subplot(212, sharex=ax1)
plt.axhline(0.0, axes=ax2, color='#cccc00')
ax2.set_ylabel('Energy Error')
ax2.set_xlabel(xtitle)
xmin = 500.0
xmax = -500.0
vmin = 1.0
vmax = -1.0
emin = 1.0
emax = -1.0
linecycler = cycle(['-', '--', '-.', ':'])
# plot reaction errors and threads
for trace, tracedata in data.items():
vaxis = []
vmcdata = []
verror = []
for rxn in tracedata:
clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
xmin = min(xmin, rxn['axis'])
xmax = max(xmax, rxn['axis'])
ax1.plot(rxn['axis'], rxn['mcdata'], '^', color=clr, markersize=6.0, mew=0, zorder=10)
vmcdata.append(rxn['mcdata'])
vaxis.append(rxn['axis'])
vmin = min(0, vmin, rxn['mcdata'])
vmax = max(0, vmax, rxn['mcdata'])
if rxn['bmdata'] is not None:
ax1.plot(rxn['axis'], rxn['bmdata'], 'o', color='black', markersize=6.0, zorder=1)
vmin = min(0, vmin, rxn['bmdata'])
vmax = max(0, vmax, rxn['bmdata'])
if rxn['error'][0] is not None:
ax2.plot(rxn['axis'], rxn['error'][0], 's', color=clr, mew=0, zorder=8)
emin = min(0, emin, rxn['error'][0])
emax = max(0, emax, rxn['error'][0])
verror.append(rxn['error'][0])
ls = next(linecycler)
ax1.plot(vaxis, vmcdata, ls, color='grey', label=trace, zorder=3)
ax2.plot(vaxis, verror, ls, color='grey', label=trace, zorder=4)
xbuf = max(0.05, abs(0.02 * xmax))
vbuf = max(0.1, abs(0.02 * vmax))
ebuf = max(0.01, abs(0.02 * emax))
plt.xlim([xmin - xbuf, xmax + xbuf])
ax1.set_ylim([vmin - vbuf, vmax + vbuf])
plt.legend(fontsize='x-small', frameon=False)
ax2.set_ylim([emin - ebuf, emax + ebuf])
# save and show
pltuid = title + '_' + hashlib.sha1(title.encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='valerr_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close() # give this a try
return files_saved
def disthist(data, title='', xtitle='', xmin=None, xmax=None,
me=None, stde=None, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Saves a plot with name *saveas* with a histogram representation
of the reaction errors in *data*. Also plots a gaussian distribution
with mean *me* and standard deviation *stde*. Plot has x-range
*xmin* to *xmax*, x-axis label *xtitle* and overall title *title*.
"""
import hashlib
import numpy as np
import matplotlib.pyplot as plt
def gaussianpdf(u, v, x):
"""*u* is mean, *v* is variance, *x* is value, returns probability"""
return 1.0 / np.sqrt(2.0 * np.pi * v) * np.exp(-pow(x - u, 2) / 2.0 / v)
me = me if me is not None else np.mean(data)
stde = stde if stde is not None else np.std(data, ddof=1)
evenerr = max(abs(me - 4.0 * stde), abs(me + 4.0 * stde))
xmin = xmin if xmin is not None else -1 * evenerr
xmax = xmax if xmax is not None else evenerr
dx = (xmax - xmin) / 40.
nx = int(round((xmax - xmin) / dx)) + 1
pdfx = []
pdfy = []
for i in range(nx):
ix = xmin + i * dx
pdfx.append(ix)
pdfy.append(gaussianpdf(me, pow(stde, 2), ix))
fig, ax1 = plt.subplots(figsize=(16, 6))
plt.axvline(0.0, color='#cccc00')
ax1.set_xlim(xmin, xmax)
ax1.hist(data, bins=30, range=(xmin, xmax), color='#2d4065', alpha=0.7)
ax1.set_xlabel(xtitle)
ax1.set_ylabel('Count')
ax2 = ax1.twinx()
ax2.fill(pdfx, pdfy, color='k', alpha=0.2)
ax2.set_ylabel('Probability Density')
plt.title(title)
# save and show
pltuid = title + '_' + hashlib.sha1((title + str(me) + str(stde) + str(xmin) + str(xmax)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='disthist_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
#def thread(data, labels, color=None, title='', xlimit=4.0, mae=None, mape=None):
# """Generates a tiered slat diagram between model chemistries with
# errors (or simply values) in list *data*, which is supplied as part of the
# dictionary for each participating reaction, along with *dbse* and *rxn* keys
# in argument *data*. The plot is labeled with *title* and each tier with
# an element of *labels* and plotted at *xlimit* from the zero-line. If
# *color* is None, slats are black, if 'sapt', colors are taken from *color*
# key in *data* [0, 1]. Summary statistics *mae* are plotted on the
# overbound side and relative statistics *mape* on the underbound side.
#
# """
# from random import random
# import matplotlib.pyplot as plt
#
# # initialize tiers/wefts
# Nweft = len(labels)
# lenS = 0.2
# gapT = 0.04
# positions = range(-1, -1 * Nweft - 1, -1)
# posnS = []
# for weft in range(Nweft):
# posnS.extend([positions[weft] + lenS, positions[weft] - lenS, None])
# posnT = []
# for weft in range(Nweft - 1):
# posnT.extend([positions[weft] - lenS - gapT, positions[weft + 1] + lenS + gapT, None])
#
# # initialize plot
# fht = Nweft * 0.8
# fig, ax = plt.subplots(figsize=(12, fht))
# plt.subplots_adjust(left=0.01, right=0.99, hspace=0.3)
# plt.xlim([-xlimit, xlimit])
# plt.ylim([-1 * Nweft - 1, 0])
# plt.yticks([])
#
# # label plot and tiers
# ax.text(-0.9 * xlimit, -0.25, title,
# verticalalignment='bottom', horizontalalignment='left',
# family='Times New Roman', weight='bold', fontsize=12)
# for weft in labels:
# ax.text(-0.9 * xlimit, -(1.2 + labels.index(weft)), weft,
# verticalalignment='bottom', horizontalalignment='left',
# family='Times New Roman', weight='bold', fontsize=18)
#
# # plot reaction errors and threads
# for rxn in data:
#
# # preparation
# xvals = rxn['data']
# clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
# slat = []
# for weft in range(Nweft):
# slat.extend([xvals[weft], xvals[weft], None])
# thread = []
# for weft in range(Nweft - 1):
# thread.extend([xvals[weft], xvals[weft + 1], None])
#
# # plotting
# ax.plot(slat, posnS, color=clr, linewidth=1.0, solid_capstyle='round')
# ax.plot(thread, posnT, color=clr, linewidth=0.5, solid_capstyle='round',
# alpha=0.3)
#
# # labeling
# try:
# toplblposn = next(item for item in xvals if item is not None)
# botlblposn = next(item for item in reversed(xvals) if item is not None)
# except StopIteration:
# pass
# else:
# ax.text(toplblposn, -0.75 + 0.6 * random(), rxn['sys'],
# verticalalignment='bottom', horizontalalignment='center',
# family='Times New Roman', fontsize=8)
# ax.text(botlblposn, -1 * Nweft - 0.75 + 0.6 * random(), rxn['sys'],
# verticalalignment='bottom', horizontalalignment='center',
# family='Times New Roman', fontsize=8)
#
# # plot trimmings
# if mae is not None:
# ax.plot([-x for x in mae], positions, 's', color='black')
# if mape is not None: # equivalent to MAE for a 10 kcal/mol IE
# ax.plot([0.025 * x for x in mape], positions, 'o', color='black')
#
# plt.axvline(0, color='black')
# plt.show()
def threads(data, labels, color=None, title='', xlimit=4.0, mae=None, mape=None,
mousetext=None, mouselink=None, mouseimag=None, mousetitle=None, mousediv=None,
labeled=True, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Generates a tiered slat diagram between model chemistries with
errors (or simply values) in list *data*, which is supplied as part of the
dictionary for each participating reaction, along with *dbse* and *rxn* keys
in argument *data*. The plot is labeled with *title* and each tier with
an element of *labels* and plotted at *xlimit* from the zero-line. If
*color* is None, slats are black, if 'sapt', colors are taken from *color*
key in *data* [0, 1]. Summary statistics *mae* are plotted on the
overbound side and relative statistics *mape* on the underbound side.
HTML code for mouseover if mousetext or mouselink or mouseimag specified
based on recipe of Andrew Dalke from
http://www.dalkescientific.com/writings/diary/archive/2005/04/24/interactive_html.html
"""
import random
import hashlib
import matplotlib.pyplot as plt
import numpy as np # only needed for missing data with mouseiness
# initialize tiers/wefts
Nweft = len(labels)
lenS = 0.2
gapT = 0.04
positions = range(-1, -1 * Nweft - 1, -1)
posnS = []
for weft in range(Nweft):
posnS.extend([positions[weft] + lenS, positions[weft] - lenS, None])
posnT = []
for weft in range(Nweft - 1):
posnT.extend([positions[weft] - lenS - gapT, positions[weft + 1] + lenS + gapT, None])
posnM = []
# initialize plot
fht = Nweft * 0.8
#fig, ax = plt.subplots(figsize=(12, fht))
fig, ax = plt.subplots(figsize=(11, fht))
plt.subplots_adjust(left=0.01, right=0.99, hspace=0.3)
plt.xlim([-xlimit, xlimit])
plt.ylim([-1 * Nweft - 1, 0])
plt.yticks([])
ax.set_frame_on(False)
if labeled:
ax.set_xticks([-0.5 * xlimit, -0.25 * xlimit, 0.0, 0.25 * xlimit, 0.5 * xlimit])
else:
ax.set_xticks([])
for tick in ax.xaxis.get_major_ticks():
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
# label plot and tiers
if labeled:
ax.text(-0.9 * xlimit, -0.25, title,
verticalalignment='bottom', horizontalalignment='left',
family='Times New Roman', weight='bold', fontsize=12)
for weft in labels:
ax.text(-0.9 * xlimit, -(1.2 + labels.index(weft)), weft,
verticalalignment='bottom', horizontalalignment='left',
family='Times New Roman', weight='bold', fontsize=18)
# plot reaction errors and threads
for rxn in data:
# preparation
xvals = rxn['data']
clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
slat = []
for weft in range(Nweft):
slat.extend([xvals[weft], xvals[weft], None])
thread = []
for weft in range(Nweft - 1):
thread.extend([xvals[weft], xvals[weft + 1], None])
# plotting
if Nweft == 1:
ax.plot(slat, posnS, '|', color=clr, markersize=20.0, mew=1.5, solid_capstyle='round')
else:
ax.plot(slat, posnS, color=clr, linewidth=1.0, solid_capstyle='round')
ax.plot(thread, posnT, color=clr, linewidth=0.5, solid_capstyle='round', alpha=0.3)
# converting into screen coordinates for image map
# block not working for py3 or up-to-date mpl. better ways for html image map nowadays
#npxvals = [np.nan if val is None else val for val in xvals]
#xyscreen = ax.transData.transform(zip(npxvals, positions))
#xscreen, yscreen = zip(*xyscreen)
#posnM.extend(zip([rxn['db']] * Nweft, [rxn['sys']] * Nweft,
# npxvals, [rxn['show']] * Nweft, xscreen, yscreen))
# labeling
if not(mousetext or mouselink or mouseimag):
if labeled and len(data) < 200:
try:
toplblposn = next(item for item in xvals if item is not None)
botlblposn = next(item for item in reversed(xvals) if item is not None)
except StopIteration:
pass
else:
ax.text(toplblposn, -0.75 + 0.6 * random.random(), rxn['sys'],
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', fontsize=8)
ax.text(botlblposn, -1 * Nweft - 0.75 + 0.6 * random.random(), rxn['sys'],
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', fontsize=8)
# plot trimmings
if mae is not None:
ax.plot([-x for x in mae], positions, 's', color='black')
if labeled:
if mape is not None: # equivalent to MAE for a 10 kcal/mol IE
ax.plot([0.025 * x for x in mape], positions, 'o', color='black')
plt.axvline(0, color='#cccc00')
# save and show
pltuid = title + '_' + ('lbld' if labeled else 'bare') + '_' + hashlib.sha1((title + repr(labels) + repr(xlimit)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='thread_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
if not (mousetext or mouselink or mouseimag):
plt.close()
return files_saved, None
else:
dpi = 80
img_width = fig.get_figwidth() * dpi
img_height = fig.get_figheight() * dpi
htmlcode = """<SCRIPT>\n"""
htmlcode += """function mouseshow(db, rxn, val, show) {\n"""
if mousetext or mouselink:
htmlcode += """ var cid = document.getElementById("cid");\n"""
if mousetext:
htmlcode += """ cid.innerHTML = %s;\n""" % (mousetext)
if mouselink:
htmlcode += """ cid.href = %s;\n""" % (mouselink)
if mouseimag:
htmlcode += """ var cmpd_img = document.getElementById("cmpd_img");\n"""
htmlcode += """ cmpd_img.src = %s;\n""" % (mouseimag)
htmlcode += """}\n"""
htmlcode += """</SCRIPT>\n"""
if mousediv:
htmlcode += """%s\n""" % (mousediv[0])
if mousetitle:
htmlcode += """%s <BR>""" % (mousetitle)
htmlcode += """<h4>Mouseover</h4><a id="cid"></a><br>\n"""
if mouseimag:
htmlcode += """<div class="text-center">"""
htmlcode += """<IMG ID="cmpd_img" WIDTH="%d" HEIGHT="%d">\n""" % (200, 160)
htmlcode += """</div>"""
if mousediv:
htmlcode += """%s\n""" % (mousediv[1])
#htmlcode += """<IMG SRC="%s" ismap usemap="#points" WIDTH="%d" HEIGHT="%d">\n""" % \
# (pltfile + '.png', img_width, img_height)
htmlcode += """<IMG SRC="%s" ismap usemap="#points" WIDTH="%d">\n""" % \
(pltfile + '.png', img_width)
htmlcode += """<MAP name="points">\n"""
# generating html image map code
# points sorted to avoid overlapping map areas that can overwhelm html for SSI
# y=0 on top for html and on bottom for mpl, so flip the numbers
posnM.sort(key=lambda tup: tup[2])
posnM.sort(key=lambda tup: tup[3])
last = (0, 0)
for dbse, rxn, val, show, x, y in posnM:
if val is None or val is np.nan:
continue
now = (int(x), int(y))
if now == last:
htmlcode += """<!-- map overlap! %s-%s %+.2f skipped -->\n""" % (dbse, rxn, val)
else:
htmlcode += """<AREA shape="rect" coords="%d,%d,%d,%d" onmouseover="javascript:mouseshow('%s', '%s', '%+.2f', '%s');">\n""" % \
(x - 2, img_height - y - 20,
x + 2, img_height - y + 20,
dbse, rxn, val, show)
last = now
htmlcode += """</MAP>\n"""
plt.close()
return files_saved, htmlcode
def ternary(sapt, title='', labeled=True, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Takes array of arrays *sapt* in form [elst, indc, disp] and builds formatted
two-triangle ternary diagrams. Either fully-readable or dotsonly depending
on *labeled*. Saves in formats *graphicsformat*.
"""
import hashlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.path import Path
import matplotlib.patches as patches
# initialize plot
fig, ax = plt.subplots(figsize=(6, 3.6))
plt.xlim([-0.75, 1.25])
plt.ylim([-0.18, 1.02])
plt.xticks([])
plt.yticks([])
ax.set_aspect('equal')
if labeled:
# form and color ternary triangles
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
pathPos = Path([(0., 0.), (1., 0.), (0.5, 0.866), (0., 0.)], codes)
pathNeg = Path([(0., 0.), (-0.5, 0.866), (0.5, 0.866), (0., 0.)], codes)
ax.add_patch(patches.PathPatch(pathPos, facecolor='white', lw=2))
ax.add_patch(patches.PathPatch(pathNeg, facecolor='#fff5ee', lw=2))
# form and color HB/MX/DD dividing lines
ax.plot([0.667, 0.5], [0., 0.866], color='#eeb4b4', lw=0.5)
ax.plot([-0.333, 0.5], [0.577, 0.866], color='#eeb4b4', lw=0.5)
ax.plot([0.333, 0.5], [0., 0.866], color='#7ec0ee', lw=0.5)
ax.plot([-0.167, 0.5], [0.289, 0.866], color='#7ec0ee', lw=0.5)
# label corners
ax.text(1.0, -0.15, u'Elst (\u2212)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
ax.text(0.5, 0.9, u'Ind (\u2212)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
ax.text(0.0, -0.15, u'Disp (\u2212)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
ax.text(-0.5, 0.9, u'Elst (+)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
xvals = []
yvals = []
cvals = []
for sys in sapt:
[elst, indc, disp] = sys
# calc ternary posn and color
Ftop = abs(indc) / (abs(elst) + abs(indc) + abs(disp))
Fright = abs(elst) / (abs(elst) + abs(indc) + abs(disp))
xdot = 0.5 * Ftop + Fright
ydot = 0.866 * Ftop
cdot = 0.5 + (xdot - 0.5) / (1. - Ftop)
if elst > 0.:
xdot = 0.5 * (Ftop - Fright)
ydot = 0.866 * (Ftop + Fright)
#print elst, indc, disp, '', xdot, ydot, cdot
xvals.append(xdot)
yvals.append(ydot)
cvals.append(cdot)
sc = ax.scatter(xvals, yvals, c=cvals, s=15, marker="o", \
cmap=mpl.cm.jet, edgecolor='none', vmin=0, vmax=1, zorder=10)
# remove figure outline
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# save and show
pltuid = title + '_' + ('lbld' if labeled else 'bare') + '_' + hashlib.sha1((title + repr(sapt)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='tern_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight',
frameon=False, dpi=450, edgecolor='none', pad_inches=0.0)
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
#def thread_mouseover_web(pltfile, dbid, dbname, xmin, xmax, mcdats, labels, titles):
# """Saves a plot with name *pltfile* with a slat representation of
# the modelchems errors in *mcdat*. Mouseover shows geometry and error
# from *labels* based on recipe of Andrew Dalke from
# http://www.dalkescientific.com/writings/diary/archive/2005/04/24/interactive_html.html
#
# """
# from matplotlib.backends.backend_agg import FigureCanvasAgg
# import matplotlib
# import sapt_colors
#
# cmpd_width = 200
# cmpd_height = 160
#
# nplots = len(mcdats)
# fht = nplots * 0.8
# fht = nplots * 0.8 * 1.4
# fig = matplotlib.figure.Figure(figsize=(12.0, fht))
# fig.subplots_adjust(left=0.01, right=0.99, hspace=0.3, top=0.8, bottom=0.2)
# img_width = fig.get_figwidth() * 80
# img_height = fig.get_figheight() * 80
#
# htmlcode = """
#<SCRIPT>
#function mouseandshow(name, id, db, dbname) {
# var cid = document.getElementById("cid");
# cid.innerHTML = name;
# cid.href = "fragmentviewer.py?name=" + id + "&dataset=" + db;
# var cmpd_img = document.getElementById("cmpd_img");
# cmpd_img.src = dbname + "/dimers/" + id + ".png";
#}
#</SCRIPT>
#
#Distribution of Fragment Errors in Interaction Energy (kcal/mol)<BR>
#Mouseover:<BR><a id="cid"></a><br>
#<IMG SRC="scratch/%s" ismap usemap="#points" WIDTH="%d" HEIGHT="%d">
#<IMG ID="cmpd_img" WIDTH="%d" HEIGHT="%d">
#<MAP name="points">
#""" % (pltfile, img_width, img_height, cmpd_width, cmpd_height)
#
# for item in range(nplots):
# print '<br><br><br><br><br><br>'
# mcdat = mcdats[item]
# label = labels[item]
# tttle = titles[item]
#
# erdat = np.array(mcdat)
# # No masked_array because interferes with html map
# #erdat = np.ma.masked_array(mcdat, mask=mask)
# yvals = np.ones(len(mcdat))
# y = np.array([sapt_colors.sapt_colors[dbname][i] for i in label])
#
# ax = fig.add_subplot(nplots, 1, item + 1)
# sc = ax.scatter(erdat, yvals, c=y, s=3000, marker="|", cmap=matplotlib.cm.jet, vmin=0, vmax=1)
# ax.set_title(tttle, fontsize=8)
# ax.set_yticks([])
# lp = ax.plot([0, 0], [0.9, 1.1], color='#cccc00', lw=2)
# ax.set_ylim([0.95, 1.05])
# ax.text(xmin + 0.3, 1.0, stats(erdat), fontsize=7, family='monospace', verticalalignment='center')
# if item + 1 == nplots:
# ax.set_xticks([-12.0, -8.0, -4.0, -2.0, -1.0, 0.0, 1.0, 2.0, 4.0, 8.0, 12.0])
# for tick in ax.xaxis.get_major_ticks():
# tick.tick1line.set_markersize(0)
# tick.tick2line.set_markersize(0)
# else:
# ax.set_xticks([])
# ax.set_frame_on(False)
# ax.set_xlim([xmin, xmax])
#
# # Convert the data set points into screen space coordinates
# #xyscreencoords = ax.transData.transform(zip(erdat, yvals))
# xyscreencoords = ax.transData.transform(zip(erdat, yvals))
# xcoords, ycoords = zip(*xyscreencoords)
#
# # HTML image coordinates have y=0 on the top. Matplotlib
# # has y=0 on the bottom. We'll need to flip the numbers
# for cid, x, y, er in zip(label, xcoords, ycoords, erdat):
# htmlcode += """<AREA shape="rect" coords="%d,%d,%d,%d" onmouseover="javascript:mouseandshow('%s %+.2f', '%s', %s, '%s');">\n""" % \
# (x - 2, img_height - y - 20, x + 2, img_height - y + 20, cid, er, cid, dbid, dbname)
#
# htmlcode += "</MAP>\n"
# canvas = FigureCanvasAgg(fig)
# canvas.print_figure('scratch/' + title, dpi=80, transparent=True)
#
# #plt.savefig('mplflat_' + title + '.pdf', bbox_inches='tight', transparent=True, format='PDF')
# #plt.savefig(os.environ['HOME'] + os.sep + 'mplflat_' + title + '.pdf', bbox_inches='tight', transparent=T rue, format='PDF')
#
# return htmlcode
def composition_tile(db, aa1, aa2):
"""Takes dictionary *db* of label, error pairs and amino acids *aa1*
and *aa2* and returns a square array of all errors for that amino
acid pair, buffered by zeros.
"""
import re
import numpy as np
bfdbpattern = re.compile("\d\d\d([A-Z][A-Z][A-Z])-\d\d\d([A-Z][A-Z][A-Z])-\d")
tiles = []
for key, val in db.items():
bfdbname = bfdbpattern.match(key)
if (bfdbname.group(1) == aa1 and bfdbname.group(2) == aa2) or \
(bfdbname.group(2) == aa1 and bfdbname.group(1) == aa2):
tiles.append(val)
if not tiles:
# fill in background when no data. only sensible for neutral center colormaps
tiles = [0]
dim = int(np.ceil(np.sqrt(len(tiles))))
pad = dim * dim - len(tiles)
tiles += [0] * pad
return np.reshape(np.array(tiles), (dim, dim))
def iowa(mcdat, mclbl, title='', xtitle='', xlimit=2.0, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Saves a plot with (extensionless) name *pltfile* with an Iowa
representation of the modelchems errors in *mcdat* for BBI/SSI-style
*labels*.
"""
import numpy as np
import hashlib
import matplotlib
import matplotlib.pyplot as plt
aa = ['ARG', 'HIE', 'LYS', 'ASP', 'GLU', 'SER', 'THR', 'ASN', 'GLN', 'CYS', 'MET', 'GLY', 'ALA', 'VAL', 'ILE', 'LEU', 'PRO', 'PHE', 'TYR', 'TRP']
#aa = ['ILE', 'LEU', 'ASP', 'GLU', 'PHE']
err = dict(zip(mclbl, mcdat))
# handle for frame, overall axis
fig, axt = plt.subplots(figsize=(6, 6))
#axt.set_xticks([]) # for quick nolabel, whiteback
#axt.set_yticks([]) # for quick nolabel, whiteback
axt.set_xticks(np.arange(len(aa)) + 0.3, minor=False)
axt.set_yticks(np.arange(len(aa)) + 0.3, minor=False)
axt.invert_yaxis()
axt.xaxis.tick_top() # comment for quick nolabel, whiteback
axt.set_xticklabels(aa, minor=False, rotation=60, size='small') # comment for quick nolabel, whiteback
axt.set_yticklabels(aa, minor=False, size='small') # comment for quick nolabel, whiteback
axt.xaxis.set_tick_params(width=0, length=0)
axt.yaxis.set_tick_params(width=0, length=0)
#axt.set_title('%s' % (title), fontsize=16, verticalalignment='bottom')
#axt.text(10.0, -1.5, title, horizontalalignment='center', fontsize=16)
# nill spacing between 20x20 heatmaps
plt.subplots_adjust(hspace=0.001, wspace=0.001)
index = 1
for aa1 in aa:
for aa2 in aa:
cb = composition_tile(err, aa1, aa2)
ax = matplotlib.axes.Subplot(fig, len(aa), len(aa), index)
fig.add_subplot(ax)
heatmap = ax.pcolor(cb, vmin=-xlimit, vmax=xlimit, cmap=plt.cm.PRGn)
ax.set_xticks([])
ax.set_yticks([])
index += 1
#plt.title(title)
axt.axvline(x=4.8, linewidth=5, color='k')
axt.axvline(x=8.75, linewidth=5, color='k')
axt.axvline(x=11.6, linewidth=5, color='k')
axt.axhline(y=4.8, linewidth=5, color='k')
axt.axhline(y=8.75, linewidth=5, color='k')
axt.axhline(y=11.6, linewidth=5, color='k')
axt.set_zorder(100)
# save and show
pltuid = title + '_' + hashlib.sha1((title + str(xlimit)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='iowa_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
#plt.savefig(savefile, transparent=False, format=ext, bbox_inches='tight') # for quick nolabel, whiteback
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
def liliowa(mcdat, title='', xlimit=2.0, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Saves a plot with a heatmap representation of *mcdat*.
"""
import numpy as np
import hashlib
import matplotlib
import matplotlib.pyplot as plt
# handle for frame, overall axis
fig, axt = plt.subplots(figsize=(1, 1))
axt.set_xticks([])
axt.set_yticks([])
axt.invert_yaxis()
axt.xaxis.set_tick_params(width=0, length=0)
axt.yaxis.set_tick_params(width=0, length=0)
axt.set_aspect('equal')
# remove figure outline
axt.spines['top'].set_visible(False)
axt.spines['right'].set_visible(False)
axt.spines['bottom'].set_visible(False)
axt.spines['left'].set_visible(False)
tiles = mcdat
dim = int(np.ceil(np.sqrt(len(tiles))))
pad = dim * dim - len(tiles)
tiles += [0] * pad
cb = np.reshape(np.array(tiles), (dim, dim))
heatmap = axt.pcolor(cb, vmin=-xlimit, vmax=xlimit, cmap=plt.cm.PRGn)
# save and show
pltuid = title + '_' + hashlib.sha1((title + str(xlimit)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='liliowa_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight',
frameon=False, pad_inches=0.0)
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
if __name__ == "__main__":
merge_dats = [
{'show':'a', 'db':'HSG', 'sys':'1', 'data':[0.3508, 0.1234, 0.0364, 0.0731, 0.0388]},
{'show':'b', 'db':'HSG', 'sys':'3', 'data':[0.2036, -0.0736, -0.1650, -0.1380, -0.1806]},
#{'show':'', 'db':'S22', 'sys':'14', 'data':[np.nan, -3.2144, np.nan, np.nan, np.nan]},
{'show':'c', 'db':'S22', 'sys':'14', 'data':[None, -3.2144, None, None, None]},
{'show':'d', 'db':'S22', 'sys':'15', 'data':[-1.5090, -2.5263, -2.9452, -2.8633, -3.1059]},
{'show':'e', 'db':'S22', 'sys':'22', 'data':[0.3046, -0.2632, -0.5070, -0.4925, -0.6359]}]
threads(merge_dats, labels=['d', 't', 'dt', 'q', 'tq'], color='sapt',
title='MP2-CPa[]z', mae=[0.25, 0.5, 0.5, 0.3, 1.0], mape=[20.1, 25, 15, 5.5, 3.6])
more_dats = [
{'mc':'MP2-CP-adz', 'data':[1.0, 0.8, 1.4, 1.6]},
{'mc':'MP2-CP-adtz', 'data':[0.6, 0.2, 0.4, 0.6]},
None,
{'mc':'MP2-CP-adzagain', 'data':[1.0, 0.8, 1.4, 1.6]}]
bars(more_dats, title='asdf')
single_dats = [
{'dbse':'HSG', 'sys':'1', 'data':[0.3508]},
{'dbse':'HSG', 'sys':'3', 'data':[0.2036]},
{'dbse':'S22', 'sys':'14', 'data':[None]},
{'dbse':'S22', 'sys':'15', 'data':[-1.5090]},
{'dbse':'S22', 'sys':'22', 'data':[0.3046]}]
#flat(single_dats, color='sapt', title='fg_MP2_adz', mae=0.25, mape=20.1)
flat([{'sys': '1', 'color': 0.6933450559423702, 'data': [0.45730000000000004]}, {'sys': '2', 'color': 0.7627027688599753, 'data': [0.6231999999999998]}, {'sys': '3', 'color': 0.7579958735528617, 'data': [2.7624999999999993]}, {'sys': '4', 'color': 0.7560883254421639, 'data': [2.108600000000001]}, {'sys': '5', 'color': 0.7515161912065955, 'data': [2.2304999999999993]}, {'sys': '6', 'color': 0.7235223893438876, 'data': [1.3782000000000014]}, {'sys': '7', 'color': 0.7120099024225569, 'data': [1.9519000000000002]}, {'sys': '8', 'color': 0.13721565059144678, 'data': [0.13670000000000004]}, {'sys': '9', 'color': 0.3087395095814767, 'data': [0.2966]}, {'sys': '10', 'color': 0.25493207637105103, 'data': [-0.020199999999999996]}, {'sys': '11', 'color': 0.24093814608979347, 'data': [-1.5949999999999998]}, {'sys': '12', 'color': 0.3304746631959777, 'data': [-1.7422000000000004]}, {'sys': '13', 'color': 0.4156050644764822, 'data': [0.0011999999999989797]}, {'sys': '14', 'color': 0.2667207259626991, 'data': [-2.6083999999999996]}, {'sys': '15', 'color': 0.3767053567641695, 'data': [-1.5090000000000003]}, {'sys': '16', 'color': 0.5572641509433963, 'data': [0.10749999999999993]}, {'sys': '17', 'color': 0.4788598239641578, 'data': [0.29669999999999996]}, {'sys': '18', 'color': 0.3799031371351281, 'data': [0.10209999999999964]}, {'sys': '19', 'color': 0.5053227185999078, 'data': [0.16610000000000014]}, {'sys': '20', 'color': 0.2967660584483015, 'data': [-0.37739999999999974]}, {'sys': '21', 'color': 0.38836460733750316, 'data': [-0.4712000000000005]}, {'sys': '22', 'color': 0.5585849893078809, 'data': [0.30460000000000065]}, {'sys': 'BzBz_PD36-1.8', 'color': 0.1383351040559965, 'data': [-1.1921]}, {'sys': 'BzBz_PD34-2.0', 'color': 0.23086034843049832, 'data': [-1.367]}, {'sys': 'BzBz_T-5.2', 'color': 0.254318060864096, 'data': [-0.32230000000000025]}, {'sys': 'BzBz_T-5.1', 'color': 0.26598486566733337, 'data': [-0.3428]}, {'sys': 'BzBz_T-5.0', 'color': 0.28011258347610224, 'data': [-0.36060000000000025]}, {'sys': 'PyPy_S2-3.9', 'color': 0.14520332101084785, 'data': [-0.9853000000000001]}, {'sys': 'PyPy_S2-3.8', 'color': 0.1690757103699542, 'data': [-1.0932]}, {'sys': 'PyPy_S2-3.5', 'color': 0.25615734567417053, 'data': [-1.4617]}, {'sys': 'PyPy_S2-3.7', 'color': 0.19566550224566906, 'data': [-1.2103999999999995]}, {'sys': 'PyPy_S2-3.6', 'color': 0.22476748600170826, 'data': [-1.3333]}, {'sys': 'BzBz_PD32-2.0', 'color': 0.31605681987208084, 'data': [-1.6637]}, {'sys': 'BzBz_T-4.8', 'color': 0.31533827331543723, 'data': [-0.38759999999999994]}, {'sys': 'BzBz_T-4.9', 'color': 0.2966146678069063, 'data': [-0.3759999999999999]}, {'sys': 'BzH2S-3.6', 'color': 0.38284814928043304, 'data': [-0.1886000000000001]}, {'sys': 'BzBz_PD32-1.7', 'color': 0.3128835191478639, 'data': [-1.8703999999999998]}, {'sys': 'BzMe-3.8', 'color': 0.24117892478245323, 'data': [-0.034399999999999986]}, {'sys': 'BzMe-3.9', 'color': 0.22230903086047088, 'data': [-0.046499999999999986]}, {'sys': 'BzH2S-3.7', 'color': 0.36724255203373696, 'data': [-0.21039999999999992]}, {'sys': 'BzMe-3.6', 'color': 0.284901522674611, 'data': [0.007099999999999884]}, {'sys': 'BzMe-3.7', 'color': 0.2621086166558813, 'data': [-0.01770000000000005]}, {'sys': 'BzBz_PD32-1.9', 'color': 0.314711251903219, 'data': [-1.7353999999999998]}, {'sys': 'BzBz_PD32-1.8', 'color': 0.3136181753200793, 'data': [-1.8039999999999998]}, {'sys': 'BzH2S-3.8', 'color': 0.3542001591399945, 'data': [-0.22230000000000016]}, {'sys': 'BzBz_PD36-1.9', 'color': 0.14128552184232473, 'data': [-1.1517]}, {'sys': 'BzBz_S-3.7', 'color': 0.08862098445220466, 'data': [-1.3414]}, {'sys': 'BzH2S-4.0', 'color': 0.33637540012259076, 'data': [-0.2265999999999999]}, {'sys': 'BzBz_PD36-1.5', 'color': 0.13203548045236127, 'data': [-1.3035]}, {'sys': 'BzBz_S-3.8', 'color': 0.0335358832178858, 'data': [-1.2022]}, {'sys': 'BzBz_S-3.9', 'color': 0.021704594689389095, 'data': [-1.0747]}, {'sys': 'PyPy_T3-5.1', 'color': 0.3207725129126432, 'data': [-0.2958000000000003]}, {'sys': 'PyPy_T3-5.0', 'color': 0.3254925304351165, 'data': [-0.30710000000000015]}, {'sys': 'BzBz_PD36-1.7', 'color': 0.13577087141986593, 'data': [-1.2333000000000003]}, {'sys': 'PyPy_T3-4.8', 'color': 0.3443704059902452, 'data': [-0.32010000000000005]}, {'sys': 'PyPy_T3-4.9', 'color': 0.3333442013628509, 'data': [-0.3158999999999996]}, {'sys': 'PyPy_T3-4.7', 'color': 0.35854000505665756, 'data': [-0.31530000000000014]}, {'sys': 'BzBz_PD36-1.6', 'color': 0.13364651314909243, 'data': [-1.2705000000000002]}, {'sys': 'BzMe-4.0', 'color': 0.20560117919562013, 'data': [-0.05389999999999984]}, {'sys': 'MeMe-3.6', 'color': 0.16934865900383142, 'data': [0.18420000000000003]}, {'sys': 'MeMe-3.7', 'color': 0.1422332591197123, 'data': [0.14680000000000004]}, {'sys': 'MeMe-3.4', 'color': 0.23032794290360467, 'data': [0.29279999999999995]}, {'sys': 'MeMe-3.5', 'color': 0.19879551978386897, 'data': [0.23260000000000003]}, {'sys': 'MeMe-3.8', 'color': 0.11744404936205816, 'data': [0.11680000000000001]}, {'sys': 'BzBz_PD34-1.7', 'color': 0.22537382457222138, 'data': [-1.5286999999999997]}, {'sys': 'BzBz_PD34-1.6', 'color': 0.22434088042760192, 'data': [-1.5754000000000001]}, {'sys': 'BzBz_PD32-2.2', 'color': 0.3189891685300601, 'data': [-1.5093999999999999]}, {'sys': 'BzBz_S-4.1', 'color': 0.10884135031532088, 'data': [-0.8547000000000002]}, {'sys': 'BzBz_S-4.0', 'color': 0.06911476296747143, 'data': [-0.9590000000000001]}, {'sys': 'BzBz_PD34-1.8', 'color': 0.22685419834431494, 'data': [-1.476]}, {'sys': 'BzBz_PD34-1.9', 'color': 0.2287079261672095, 'data': [-1.4223999999999997]}, {'sys': 'BzH2S-3.9', 'color': 0.3439077006047999, 'data': [-0.22739999999999982]}, {'sys': 'FaNNFaNN-4.1', 'color': 0.7512716174974567, 'data': [1.7188999999999997]}, {'sys': 'FaNNFaNN-4.0', 'color': 0.7531388297328865, 'data': [1.9555000000000007]}, {'sys': 'FaNNFaNN-4.3', 'color': 0.7478064149182957, 'data': [1.2514000000000003]}, {'sys': 'FaNNFaNN-4.2', 'color': 0.7493794908838113, 'data': [1.4758000000000013]}, {'sys': 'FaOOFaON-4.0', 'color': 0.7589275618320565, 'data': [2.0586]}, {'sys': 'FaOOFaON-3.7', 'color': 0.7619465815742713, 'data': [3.3492999999999995]}, {'sys': 'FaOOFaON-3.9', 'color': 0.7593958895631474, 'data': [2.4471000000000007]}, {'sys': 'FaOOFaON-3.8', 'color': 0.7605108059280967, 'data': [2.8793999999999986]}, {'sys': 'FaONFaON-4.1', 'color': 0.7577459277014137, 'data': [1.8697999999999997]}, {'sys': 'FaOOFaON-3.6', 'color': 0.7633298028299997, 'data': [3.847599999999998]}, {'sys': 'FaNNFaNN-3.9', 'color': 0.7548200901251662, 'data': [2.2089]}, {'sys': 'FaONFaON-3.8', 'color': 0.7582294603551467, 'data': [2.967699999999999]}, {'sys': 'FaONFaON-3.9', 'color': 0.7575285282217349, 'data': [2.578900000000001]}, {'sys': 'FaONFaON-4.2', 'color': 0.7594549221042256, 'data': [1.5579999999999998]}, {'sys': 'FaOOFaNN-3.6', 'color': 0.7661655616885379, 'data': [3.701599999999999]}, {'sys': 'FaOOFaNN-3.7', 'color': 0.7671068376007428, 'data': [3.156500000000001]}, {'sys': 'FaOOFaNN-3.8', 'color': 0.766947626251711, 'data': [2.720700000000001]}, {'sys': 'FaONFaNN-3.9', 'color': 0.7569836601896789, 'data': [2.4281000000000006]}, {'sys': 'FaONFaNN-3.8', 'color': 0.758024548462959, 'data': [2.7561999999999998]}, {'sys': 'FaOOFaOO-3.6', 'color': 0.7623422640217077, 'data': [3.851800000000001]}, {'sys': 'FaOOFaOO-3.7', 'color': 0.7597430792159379, 'data': [3.2754999999999974]}, {'sys': 'FaOOFaOO-3.4', 'color': 0.7672554950739594, 'data': [5.193299999999999]}, {'sys': 'FaOOFaOO-3.5', 'color': 0.764908813123865, 'data': [4.491900000000001]}, {'sys': 'FaONFaNN-4.2', 'color': 0.7549212942233738, 'data': [1.534699999999999]}, {'sys': 'FaONFaNN-4.0', 'color': 0.7559404310956357, 'data': [2.1133000000000024]}, {'sys': 'FaONFaNN-4.1', 'color': 0.7551574698775625, 'data': [1.813900000000002]}, {'sys': 'FaONFaON-4.0', 'color': 0.7572064604483282, 'data': [2.2113999999999994]}, {'sys': 'FaOOFaOO-3.8', 'color': 0.7573810956831686, 'data': [2.7634000000000007]}, {'sys': '1', 'color': 0.2784121805328983, 'data': [0.3508]}, {'sys': '2', 'color': 0.22013842798900166, 'data': [-0.034600000000000186]}, {'sys': '3', 'color': 0.12832496088281312, 'data': [0.20360000000000023]}, {'sys': '4', 'color': 0.6993695033529733, 'data': [1.9092000000000002]}, {'sys': '5', 'color': 0.7371192790053749, 'data': [1.656600000000001]}, {'sys': '6', 'color': 0.5367033190796172, 'data': [0.27970000000000006]}, {'sys': '7', 'color': 0.3014220615964802, 'data': [0.32289999999999974]}, {'sys': '8', 'color': 0.01605867807629261, 'data': [0.12199999999999994]}, {'sys': '9', 'color': 0.6106300539083558, 'data': [0.3075999999999999]}, {'sys': '10', 'color': 0.6146680031333968, 'data': [0.6436000000000002]}, {'sys': '11', 'color': 0.6139747851721759, 'data': [0.4551999999999996]}, {'sys': '12', 'color': 0.32122739401126593, 'data': [0.44260000000000005]}, {'sys': '13', 'color': 0.24678148099136055, 'data': [-0.11789999999999967]}, {'sys': '14', 'color': 0.23700950710597016, 'data': [0.42689999999999995]}, {'sys': '15', 'color': 0.23103396678138563, 'data': [0.3266]}, {'sys': '16', 'color': 0.1922070769654413, 'data': [0.0696000000000001]}, {'sys': '17', 'color': 0.19082151944747366, 'data': [0.11159999999999992]}, {'sys': '18', 'color': 0.2886200282444196, 'data': [0.4114]}, {'sys': '19', 'color': 0.23560171133945224, 'data': [-0.1392]}, {'sys': '20', 'color': 0.3268270751294533, 'data': [0.5593]}, {'sys': '21', 'color': 0.7324460869158442, 'data': [0.6806000000000001]}],
color='sapt', title='MP2-CP-adz', mae=1.21356003247, mape=24.6665886087, xlimit=4.0)
lin_dats = [-0.5, -0.4, -0.3, 0, .5, .8, 5]
lin_labs = ['008ILE-012LEU-1', '012LEU-085ASP-1', '004GLU-063LEU-2',
'011ILE-014PHE-1', '027GLU-031LEU-1', '038PHE-041ILE-1', '199LEU-202GLU-1']
iowa(lin_dats, lin_labs, title='ttl', xlimit=0.5)
figs = [0.22, 0.41, 0.14, 0.08, 0.47,
0, 0.38, 0.22, 0.10, 0.20,
0, 0, 0.13, 0.07, 0.25,
0, 0, 0, 0.06, 0.22,
0, 0, 0, 0, 0.69]
liliowa(figs, saveas='SSI-default-MP2-CP-aqz', xlimit=1.0)
disthist(lin_dats)
valerrdata = [{'color': 0.14255710779686612, 'db': 'NBC1', 'sys': 'BzBz_S-3.6', 'error': [0.027999999999999803], 'mcdata': -1.231, 'bmdata': -1.259, 'axis': 3.6}, {'color': 0.08862098445220466, 'db': 'NBC1', 'sys': 'BzBz_S-3.7', 'error': [0.02300000000000013], 'mcdata': -1.535, 'bmdata': -1.558, 'axis': 3.7}, {'color': 0.246634626511043, 'db': 'NBC1', 'sys': 'BzBz_S-3.4', 'error': [0.04200000000000001], 'mcdata': 0.189, 'bmdata': 0.147, 'axis': 3.4}, {'color': 0.19526236766857613, 'db': 'NBC1', 'sys': 'BzBz_S-3.5', 'error': [0.03500000000000003], 'mcdata': -0.689, 'bmdata': -0.724, 'axis': 3.5}, {'color': 0.3443039102164425, 'db': 'NBC1', 'sys': 'BzBz_S-3.2', 'error': [0.05999999999999961], 'mcdata': 3.522, 'bmdata': 3.462, 'axis': 3.2}, {'color': 0.29638827303466814, 'db': 'NBC1', 'sys': 'BzBz_S-3.3', 'error': [0.050999999999999934], 'mcdata': 1.535, 'bmdata': 1.484, 'axis': 3.3}, {'color': 0.42859228971962615, 'db': 'NBC1', 'sys': 'BzBz_S-6.0', 'error': [0.0020000000000000018], 'mcdata': -0.099, 'bmdata': -0.101, 'axis': 6.0}, {'color': 0.30970751839224836, 'db': 'NBC1', 'sys': 'BzBz_S-5.0', 'error': [0.0040000000000000036], 'mcdata': -0.542, 'bmdata': -0.546, 'axis': 5.0}, {'color': 0.3750832778147902, 'db': 'NBC1', 'sys': 'BzBz_S-5.5', 'error': [0.0030000000000000027], 'mcdata': -0.248, 'bmdata': -0.251, 'axis': 5.5}, {'color': 0.0335358832178858, 'db': 'NBC1', 'sys': 'BzBz_S-3.8', 'error': [0.019000000000000128], 'mcdata': -1.674, 'bmdata': -1.693, 'axis': 3.8}, {'color': 0.021704594689389095, 'db': 'NBC1', 'sys': 'BzBz_S-3.9', 'error': [0.016000000000000014], 'mcdata': -1.701, 'bmdata': -1.717, 'axis': 3.9}, {'color': 0.22096255119953187, 'db': 'NBC1', 'sys': 'BzBz_S-4.5', 'error': [0.008000000000000007], 'mcdata': -1.058, 'bmdata': -1.066, 'axis': 4.5}, {'color': 0.10884135031532088, 'db': 'NBC1', 'sys': 'BzBz_S-4.1', 'error': [0.01200000000000001], 'mcdata': -1.565, 'bmdata': -1.577, 'axis': 4.1}, {'color': 0.06911476296747143, 'db': 'NBC1', 'sys': 'BzBz_S-4.0', 'error': [0.014000000000000012], 'mcdata': -1.655, 'bmdata': -1.669, 'axis': 4.0}, {'color': 0.14275218373289067, 'db': 'NBC1', 'sys': 'BzBz_S-4.2', 'error': [0.01100000000000012], 'mcdata': -1.448, 'bmdata': -1.459, 'axis': 4.2}, {'color': 0.4740372133275638, 'db': 'NBC1', 'sys': 'BzBz_S-6.5', 'error': [0.0010000000000000009], 'mcdata': -0.028, 'bmdata': -0.029, 'axis': 6.5}, {'color': 0.6672504378283713, 'db': 'NBC1', 'sys': 'BzBz_S-10.0', 'error': [0.0], 'mcdata': 0.018, 'bmdata': 0.018, 'axis': 10.0}]
valerr({'cat': valerrdata},
color='sapt', xtitle='Rang', title='aggh', graphicsformat=['png'])
| lgpl-3.0 |
feranick/SpectralMachine | Archive/20170609c/SpectraLearnPredict.py | 1 | 60594 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
*
* SpectraLearnPredict
* Perform Machine Learning on Raman spectra.
* version: 20170609c
*
* Uses: Deep Neural Networks, TensorFlow, SVM, PCA, K-Means
*
* By: Nicola Ferralis <feranick@hotmail.com>
*
***********************************************************
'''
print(__doc__)
import matplotlib
if matplotlib.get_backend() == 'TkAgg':
matplotlib.use('Agg')
import numpy as np
import sys, os.path, getopt, glob, csv
from os.path import exists, splitext
from os import rename
from datetime import datetime, date
import random
#***************************************************************
''' Spectra normalization, preprocessing, model selection '''
#***************************************************************
class preprocDef:
Ynorm = True # Normalize spectra (True: recommended)
fullYnorm = False # Normalize considering full range (False: recommended)
StandardScalerFlag = True # Standardize features by removing the mean and scaling to unit variance (sklearn)
YnormTo = 1
YnormX = 1600
YnormXdelta = 30
enRestrictRegion = False
enLim1 = 450 # for now use indexes rather than actual Energy
enLim2 = 550 # for now use indexes rather than actual Energy
scrambleNoiseFlag = False # Adds random noise to spectra (False: recommended)
scrambleNoiseOffset = 0.1
if StandardScalerFlag:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
#**********************************************
''' Calculation by limited number of points '''
#**********************************************
cherryPickEnPoint = False # False recommended
enSel = [1050, 1150, 1220, 1270, 1330, 1410, 1480, 1590, 1620, 1650]
enSelDelta = [2, 2, 2, 2, 10, 2, 2, 15, 5, 2]
#enSel = [1220, 1270, 1590]
#enSelDelta = [2, 2, 30]
if(cherryPickEnPoint == True):
enRestrictRegion = False
print(' Calculation by limited number of points: ENABLED ')
print(' THIS IS AN EXPERIMENTAL FEATURE \n')
print(' Restricted range: DISABLED')
#**********************************************
''' Deep Neural Networks - sklearn'''
#**********************************************
class nnDef:
runNN = True
alwaysRetrain = False
subsetCrossValid = False
percentCrossValid = 0.10 # proportion of TEST data for cross validation
iterCrossValid = 2
numNeurons = 200 #default = 200
# Optimizers: lbfgs (default), adam, sgd
nnOptimizer = "lbfgs"
# activation functions: http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
# identity, logistic (sigmoid), tanh, relu
activation_function = "tanh"
MLPRegressor = False
# threshold in % of probabilities for listing prediction results
thresholdProbabilityPred = 0.001
plotNN = True
nnClassReport = False
#***********************************************************
''' Deep Neural Networks - tensorflow via DNNClassifier'''
#***********************************************************
class dnntfDef:
runDNNTF = True
alwaysRetrain = False
subsetCrossValid = False
percentCrossValid = 0.10 # proportion of TEST data for cross validation
iterCrossValid = 1
numNeurons = 200 # number of neurons per layer
numHidlayers = 1 # number of hidden layer
# Optimizers: Adagrad (recommended), Adam, Ftrl, Momentum, RMSProp, SGD
# https://www.tensorflow.org/api_guides/python/train
nnOptimizer = "Adagrad"
# activation functions: https://www.tensorflow.org/api_guides/python/nn
# relu, relu6, crelu, elu, softplus, softsign, dropout, bias_add
# sigmoid, tanh
activation_function = "tanh"
trainingSteps = 1000 #number of training steps
# threshold in % of probabilities for listing prediction results
thresholdProbabilityPred = 0.01
logCheckpoint = False
#*************************************************
# Setup variables and definitions- do not change.
#*************************************************
hidden_layers = [numNeurons]*numHidlayers
if runDNNTF == True:
import tensorflow as tf
if activation_function == "sigmoid" or activation_function == "tanh":
actFn = "tf."+activation_function
else:
actFn = "tf.nn."+activation_function
activationFn = eval(actFn)
#**********************************************
''' Support Vector Machines'''
#**********************************************
class svmDef:
runSVM = True
alwaysRetrain = False
subsetCrossValid = False
percentCrossValid = 0.10 # proportion of TEST data for cross validation
iterCrossValid = 2
# threshold in % of probabilities for listing prediction results
thresholdProbabilitySVMPred = 3
''' Training algorithm for SVM
Use either 'linear' or 'rbf'
('rbf' for large number of features) '''
Cfactor = 20
kernel = 'rbf'
showClasses = False
plotSVM = True
svmClassReport = False
#**********************************************
''' Principal component analysis (PCA) '''
#**********************************************
class pcaDef:
runPCA = False
customNumPCAComp = True
numPCAcomponents = 2
#**********************************************
''' K-means '''
#**********************************************
class kmDef:
runKM = False
customNumKMComp = False
numKMcomponents = 20
plotKM = False
plotKMmaps = True
#**********************************************
''' TensorFlow '''
#**********************************************
class tfDef:
runTF = False
alwaysRetrain = False
alwaysImprove = False # alwaysRetrain must be True for this to work
subsetCrossValid = True
percentCrossValid = 0.1 # proportion of TEST data for cross validation
iterCrossValid = 2
# threshold in % of probabilities for listing prediction results
thresholdProbabilityTFPred = 30
decayLearnRate = True
learnRate = 0.75
plotMapTF = True
plotClassDistribTF = False
enableTensorboard = False
#**********************************************
''' Plotting '''
#**********************************************
class plotDef:
showProbPlot = False
showPCAPlots = True
createTrainingDataPlot = False
showTrainingDataPlot = False
plotAllSpectra = True # Set to false for extremely large training sets
if plotAllSpectra == False:
stepSpectraPlot = 100 # steps in the number of spectra to be plotted
#**********************************************
''' Multiprocessing '''
#**********************************************
multiproc = False
#**********************************************
''' Main '''
#**********************************************
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "fatmbkph:", ["file", "accuracy", "traintf", "map", "batch", "kmaps", "pca", "help"])
except:
usage()
sys.exit(2)
if opts == []:
usage()
sys.exit(2)
print(" Using training file: ", sys.argv[2],"\n")
for o, a in opts:
if o in ("-f" , "--file"):
try:
LearnPredictFile(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-a" , "--accuracy"):
print('\033[1m Running in cross validation mode for accuracy determination...\033[0m\n')
nnDef.alwaysRetrain = True
nnDef.subsetCrossValid = True
dnntfDef.alwaysRetrain = True
dnntfDef.subsetCrossValid = True
dnntfDef.logCheckpoint = True
svmDef.alwaysRetrain = True
svmDef.subsetCrossValid = True
tfDef.alwaysRetrain = True
tfDef.subsetCrossValid = True
try:
LearnPredictFile(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-t" , "--traintf"):
if len(sys.argv) > 3:
numRuns = int(sys.argv[3])
else:
numRuns = 1
preprocDef.scrambleNoiseFlag = False
try:
TrainTF(sys.argv[2], int(numRuns))
except:
usage()
sys.exit(2)
if o in ("-m" , "--map"):
try:
LearnPredictMap(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-b" , "--batch"):
try:
LearnPredictBatch(sys.argv[2])
except:
usage()
sys.exit(2)
if o in ("-p" , "--pca"):
if len(sys.argv) > 3:
numPCAcomp = int(sys.argv[3])
else:
numPCAcomp = pcaDef.numPCAcomponents
try:
runPCA(sys.argv[2], numPCAcomp)
except:
usage()
sys.exit(2)
if o in ("-k" , "--kmaps"):
if len(sys.argv) > 3:
numKMcomp = int(sys.argv[3])
else:
numKMcomp = kmDef.numKMcomponents
try:
KmMap(sys.argv[2], numKMcomp)
except:
usage()
sys.exit(2)
#**********************************************
''' Learn and Predict - File'''
#**********************************************
def LearnPredictFile(learnFile, sampleFile):
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Run PCA '''
if pcaDef.runPCA == True:
runPCAmain(A, Cl, En)
''' Open prediction file '''
R, Rx = readPredFile(sampleFile)
''' Preprocess prediction data '''
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
R, Rorig = preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, 0)
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
runNN(A, Cl, R, learnFileRoot)
''' Run Neural Network - TensorFlow'''
if dnntfDef.runDNNTF == True:
runDNNTF(A, Cl, R, learnFileRoot)
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
runSVM(A, Cl, En, R, learnFileRoot)
''' Tensorflow '''
if tfDef.runTF == True:
runTFbasic(A,Cl,R, learnFileRoot)
''' Plot Training Data '''
if plotDef.createTrainingDataPlot == True:
plotTrainData(A, En, R, plotDef.plotAllSpectra, learnFileRoot)
''' Run K-Means '''
if kmDef.runKM == True:
runKMmain(A, Cl, En, R, Aorig, Rorig)
#**********************************************
''' Process - Batch'''
#**********************************************
def LearnPredictBatch(learnFile):
summary_filename = 'summary' + str(datetime.now().strftime('_%Y-%m-%d_%H-%M-%S.csv'))
makeHeaderSummary(summary_filename, learnFile)
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
if multiproc == True:
import multiprocessing as mp
p = mp.Pool()
for f in glob.glob('*.txt'):
if (f != learnFile):
p.apply_async(processSingleBatch, args=(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile))
p.close()
p.join()
else:
for f in glob.glob('*.txt'):
if (f != learnFile):
processSingleBatch(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile)
def processSingleBatch(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile):
print(' Processing file: \033[1m' + f + '\033[0m\n')
R, Rx = readPredFile(f)
summaryFile = [f]
''' Preprocess prediction data '''
R, Rorig = preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, 0)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
nnPred, nnProb = runNN(A, Cl, R, learnFileRoot)
summaryFile.extend([nnPred, nnProb])
nnDef.alwaysRetrain = False
''' Run Neural Network - TensorFlow'''
if dnntfDef.runDNNTF == True:
dnntfPred, dnntfProb = runDNNTF(A, Cl, R, learnFileRoot)
summaryFile.extend([nnPred, nnProb])
dnntfDef.alwaysRetrain = False
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
svmPred, svmProb = runSVM(A, Cl, En, R, learnFileRoot)
summaryFile.extend([svmPred, svmProb])
svmDef.alwaysRetrain = False
''' Tensorflow '''
if tfDef.runTF == True:
tfPred, tfProb, tfAccur = runTFbasic(A,Cl,R, learnFileRoot)
summaryFile.extend([tfPred, tfProb, tfAccur])
tfDef.tfalwaysRetrain = False
''' Run K-Means '''
if kmDef.runKM == True:
kmDef.plotKM = False
kmPred = runKMmain(A, Cl, En, R, Aorig, Rorig)
summaryFile.extend([kmPred])
with open(summary_filename, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryFile)
#**********************************************
''' Learn and Predict - Maps'''
#**********************************************
def LearnPredictMap(learnFile, mapFile):
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Open prediction map '''
X, Y, R, Rx = readPredMap(mapFile)
type = 0
i = 0;
svmPred = nnPred = tfPred = kmPred = np.empty([X.shape[0]])
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, type)
print(' Processing map...' )
for r in R[:]:
r, rorig = preProcessNormPredData(r, Rx, A, En, Cl, YnormXind, type)
type = 1
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
nnPred[i], temp = runNN(A, Cl, r, learnFileRoot)
saveMap(mapFile, 'NN', 'HC', nnPred[i], X[i], Y[i], True)
nnDef.alwaysRetrain = False
''' Run Neural Network - TensorFlow'''
if nnDef.runNN == True:
dnntfPred[i], temp = runDNNTF(A, Cl, r, learnFileRoot)
saveMap(mapFile, 'DNN-TF', 'HC', dnntfPred[i], X[i], Y[i], True)
dnnDef.alwaysRetrain = False
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
svmPred[i], temp = runSVM(A, Cl, En, r, learnFileRoot)
saveMap(mapFile, 'svm', 'HC', svmPred[i], X[i], Y[i], True)
svmDef.alwaysRetrain = False
''' Tensorflow '''
if tfDef.runTF == True:
tfPred[i], temp, temp = runTFbasic(A,Cl,r, learnFileRoot)
saveMap(mapFile, 'TF', 'HC', tfPred[i], X[i], Y[i], True)
tfDef.alwaysRetrain = False
''' Run K-Means '''
if kmDef.runKM == True:
kmDef.plotKM = False
kmPred[i] = runKMmain(A, Cl, En, r, Aorig, rorig)
saveMap(mapFile, 'KM', 'HC', kmPred[i], X[i], Y[i], True)
i+=1
if nnDef.plotNN == True and nnDef.runNN == True:
plotMaps(X, Y, nnPred, 'Deep Neural networks - sklearn')
if nnDef.plotNN == True and nnDef.runNN == True:
plotMaps(X, Y, dnntfPred, 'Deep Neural networks - tensorFlow')
if svmDef.plotSVM == True and svmDef.runSVM == True:
plotMaps(X, Y, svmPred, 'SVM')
if tfDef.plotMapTF == True and tfDef.runTF == True:
plotMaps(X, Y, tfPred, 'TensorFlow')
if kmDef.plotKMmaps == True and kmDef.runKM == True:
plotMaps(X, Y, kmPred, 'K-Means Prediction')
#********************************************************************************
''' Run Neural Network - sklearn '''
#********************************************************************************
def runNN(A, Cl, R, Root):
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.externals import joblib
if nnDef.MLPRegressor is False:
Root+"/DNN-TF_"
nnTrainedData = Root + '.nnModelC.pkl'
else:
nnTrainedData = Root + '.nnModelR.pkl'
print('==========================================================================\n')
print(' Running Neural Network: multi-layer perceptron (MLP)')
print(' Number of neurons: Hidden layers:', nnDef.numNeurons)
print(' Optimizer:',nnDef.nnOptimizer,', Activation Fn:',nnDef.activation_function)
try:
if nnDef.alwaysRetrain == False:
with open(nnTrainedData):
print(' Opening NN training model...\n')
clf = joblib.load(nnTrainedData)
else:
raise ValueError('Force NN retraining.')
except:
#**********************************************
''' Retrain training data if not available'''
#**********************************************
if nnDef.MLPRegressor is False:
print(' Retraining NN model using MLP Classifier...')
clf = MLPClassifier(solver=nnDef.nnOptimizer, alpha=1e-5, activation = nnDef.activation_function,
hidden_layer_sizes=(nnDef.numNeurons,), random_state=1)
else:
print(' Retraining NN model using MLP Regressor...')
clf = MLPRegressor(solver=nnDef.nnOptimizer, alpha=1e-5, hidden_layer_sizes=(nnDef.numNeurons,), random_state=1)
Cl = np.array(Cl,dtype=float)
if nnDef.subsetCrossValid == True:
print(" Iterating training using: ",str(nnDef.percentCrossValid*100), "% as test subset, iterating",str(nnDef.iterCrossValid)," time(s) ...\n")
for i in range(nnDef.iterCrossValid):
As, Cls, As_cv, Cls_cv = formatSubset(A, Cl, nnDef.percentCrossValid)
clf.fit(As, Cls)
if nnDef.MLPRegressor is False:
print(' Mean accuracy: ',100*clf.score(As_cv,Cls_cv),'%')
else:
print(' Coefficient of determination R^2: ',clf.score(As_cv,Cls_cv))
else:
print(" Training on the full training dataset\n")
clf.fit(A, Cl)
joblib.dump(clf, nnTrainedData)
if nnDef.MLPRegressor is False:
prob = clf.predict_proba(R)[0].tolist()
rosterPred = np.where(clf.predict_proba(R)[0]>nnDef.thresholdProbabilityPred/100)[0]
print('\n ==============================')
print(' \033[1mNN\033[0m - Probability >',str(nnDef.thresholdProbabilityPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*clf.predict_proba(R)[0][rosterPred][i])))
print(' ==============================')
predValue = clf.predict(R)[0]
predProb = round(100*max(prob),4)
print('\033[1m' + '\n Predicted classifier value (Deep Neural Networks - sklearn) = ' + str(predValue) +
' (probability = ' + str(predProb) + '%)\033[0m\n')
else:
Cl = np.array(Cl,dtype=float)
predValue = clf.predict(R)[0]
predProb = clf.score(A,Cl)
print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - sklearn) = ' + str('{:.3f}'.format(predValue)) +
' (R^2 = ' + str('{:.5f}'.format(predProb)) + ')\033[0m\n')
#**************************************
''' Neural Networks Classification Report '''
#**************************************
if nnDef.nnClassReport == True:
print(' Neural Networks Classification Report\n')
runClassReport(clf, A, Cl)
#*************************
''' Plot probabilities '''
#*************************
if plotDef.showProbPlot == True:
if nnDef.MLPRegressor is False:
plotProb(clf, R)
return predValue, predProb
#********************************************************************************
''' TensorFlow '''
''' Run SkFlow - DNN Classifier '''
''' https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNClassifier'''
#********************************************************************************
''' Run DNNClassifier model training and evaluation via TensorFlow-skflow '''
#********************************************************************************
def runDNNTF(A, Cl, R, Root):
print('==========================================================================\n')
print(' Running Deep Neural Networks: DNNClassifier - TensorFlow...')
print(' Hidden layers:', dnntfDef.hidden_layers)
print(' Optimizer:',dnntfDef.nnOptimizer,', Activation function:',dnntfDef.activation_function)
import tensorflow as tf
import tensorflow.contrib.learn as skflow
from sklearn import preprocessing
if dnntfDef.logCheckpoint ==True:
tf.logging.set_verbosity(tf.logging.INFO)
if dnntfDef.alwaysRetrain == False:
model_directory = Root + "/DNN-TF_" + str(dnntfDef.numHidlayers)+"x"+str(dnntfDef.numNeurons)
print("\n Training model saved in: ", model_directory, "\n")
else:
model_directory = None
print("\n Training model not saved\n")
#**********************************************
''' Initialize Estimator and training data '''
#**********************************************
print(' Initializing TensorFlow...')
tf.reset_default_graph()
le = preprocessing.LabelEncoder()
Cl2 = le.fit_transform(Cl)
feature_columns = skflow.infer_real_valued_columns_from_input(A.astype(np.float32))
clf = skflow.DNNClassifier(feature_columns=feature_columns, hidden_units=dnntfDef.hidden_layers,
optimizer=dnntfDef.nnOptimizer, n_classes=np.unique(Cl).size,
activation_fn=dnntfDef.activationFn, model_dir=model_directory)
print("\n Number of training steps:",dnntfDef.trainingSteps)
#**********************************************
''' Train '''
#**********************************************
if dnntfDef.subsetCrossValid == True:
print(" Iterating training using: ",str(dnntfDef.percentCrossValid*100), "% as test subset, iterating",str(dnntfDef.iterCrossValid)," time(s) ...\n")
for i in range(dnntfDef.iterCrossValid):
As, Cl2s, As_cv, Cl2s_cv = formatSubset(A, Cl2, dnntfDef.percentCrossValid)
clf.fit(input_fn=lambda: input_fn(As, Cl2s), steps=dnntfDef.trainingSteps)
accuracy_score = clf.evaluate(input_fn=lambda: input_fn(As_cv, Cl2s_cv), steps=1)
print("\n Accuracy: {:.2f}%".format(100*accuracy_score["accuracy"]))
print(" Loss: {:.2f}".format(accuracy_score["loss"]))
print(" Global step: {:.2f}\n".format(accuracy_score["global_step"]))
else:
print(" Training on the full training dataset\n")
clf.fit(input_fn=lambda: input_fn(A, Cl2), steps=dnntfDef.trainingSteps)
#**********************************************
''' Predict '''
#**********************************************
def input_fn_predict():
x = tf.constant(R.astype(np.float32))
return x
pred_class = list(clf.predict_classes(input_fn=input_fn_predict))[0]
predValue = le.inverse_transform(pred_class)
prob = list(clf.predict_proba(input_fn=input_fn_predict))[0]
predProb = round(100*prob[pred_class],2)
rosterPred = np.where(prob>dnntfDef.thresholdProbabilityPred/100)[0]
print('\n ================================')
print(' \033[1mDNN-TF\033[0m - Probability >',str(dnntfDef.thresholdProbabilityPred),'%')
print(' ================================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*prob[rosterPred][i])))
print(' ================================')
print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - TensorFlow) = ' + predValue +
' (probability = ' + str(predProb) + '%)\033[0m\n')
return predValue, predProb
#**********************************************
''' Format input data for Estimator '''
#**********************************************
def input_fn(A, Cl2):
import tensorflow as tf
x = tf.constant(A.astype(np.float32))
y = tf.constant(Cl2)
return x,y
#********************************************************************************
''' Run SVM '''
#********************************************************************************
def runSVM(A, Cl, En, R, Root):
from sklearn import svm
from sklearn.externals import joblib
svmTrainedData = Root + '.svmModel.pkl'
print('==========================================================================\n')
print(' Running Support Vector Machine (kernel: ' + svmDef.kernel + ')...')
try:
if svmDef.alwaysRetrain == False:
with open(svmTrainedData):
print(' Opening SVM training model...\n')
clf = joblib.load(svmTrainedData)
else:
raise ValueError('Force retraining SVM model')
except:
#**********************************************
''' Retrain training model if not available'''
#**********************************************
print(' Retraining SVM data...')
clf = svm.SVC(C = svmDef.Cfactor, decision_function_shape = 'ovr', probability=True)
if svmDef.subsetCrossValid == True:
print(" Iterating training using: ",str(nnDef.percentCrossValid*100), "% as test subset, iterating",str(nnDef.iterCrossValid)," time(s) ...\n")
for i in range(svmDef.iterCrossValid):
As, Cls, As_cv, Cls_cv = formatSubset(A, Cl, svmDef.percentCrossValid)
clf.fit(As, Cls)
print(' Mean accuracy: ',100*clf.score(As_cv,Cls_cv),'%')
else:
print(" Training on the full training dataset\n")
clf.fit(A,Cl)
Z = clf.decision_function(A)
print('\n Number of classes = ' + str(Z.shape[1]))
joblib.dump(clf, svmTrainedData)
if svmDef.showClasses == True:
print(' List of classes: ' + str(clf.classes_))
R_pred = clf.predict(R)
prob = clf.predict_proba(R)[0].tolist()
rosterPred = np.where(clf.predict_proba(R)[0]>svmDef.thresholdProbabilitySVMPred/100)[0]
print('\n ==============================')
print(' \033[1mSVM\033[0m - Probability >',str(svmDef.thresholdProbabilitySVMPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.1f}'.format(100*clf.predict_proba(R)[0][rosterPred][i])))
print(' ==============================')
print('\033[1m' + '\n Predicted value (SVM) = ' + str(R_pred[0]) + ' (probability = ' +
str(round(100*max(prob),1)) + '%)\033[0m\n')
#**************************************
''' SVM Classification Report '''
#**************************************
if svmDef.svmClassReport == True:
print(' SVM Classification Report \n')
runClassReport(clf, A, Cl)
#*************************
''' Plot probabilities '''
#*************************
if plotDef.showProbPlot == True:
plotProb(clf, R)
return R_pred[0], round(100*max(prob),1)
#********************************************************************************
''' Run PCA '''
''' Transform data:
pca.fit(data).transform(data)
Loading Vectors (eigenvectors):
pca.components_
Eigenvalues:
pca.explained_variance_ratio
'''
#********************************************************************************
def runPCA(learnFile, numPCAcomponents):
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib import cm
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
print('==========================================================================\n')
print(' Running PCA...\n')
print(' Number of unique identifiers in training data: ' + str(np.unique(Cl).shape[0]))
if pcaDef.customNumPCAComp == False:
numPCAcomp = np.unique(Cl).shape[0]
else:
numPCAcomp = numPCAcomponents
print(' Number of Principal components: ' + str(numPCAcomp) + '\n')
pca = PCA(n_components=numPCAcomp)
A_r = pca.fit(A).transform(A)
for i in range(0,pca.components_.shape[0]):
print(' Score PC ' + str(i) + ': ' + '{0:.0f}%'.format(pca.explained_variance_ratio_[i] * 100))
print('')
if plotDef.showPCAPlots == True:
print(' Plotting Loadings and score plots... \n')
#***************************
''' Plotting Loadings '''
#***************************
for i in range(0,pca.components_.shape[0]):
plt.plot(En, pca.components_[i,:], label='PC' + str(i) + ' ({0:.0f}%)'.format(pca.explained_variance_ratio_[i] * 100))
plt.plot((En[0], En[En.shape[0]-1]), (0.0, 0.0), 'k--')
plt.title('Loadings plot')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Principal component')
plt.legend()
plt.figure()
#***************************
''' Plotting Scores '''
#***************************
Cl_ind = np.zeros(len(Cl))
Cl_labels = np.zeros(0)
ind = np.zeros(np.unique(Cl).shape[0])
for i in range(len(Cl)):
if (np.in1d(Cl[i], Cl_labels, invert=True)):
Cl_labels = np.append(Cl_labels, Cl[i])
for i in range(len(Cl)):
Cl_ind[i] = np.where(Cl_labels == Cl[i])[0][0]
colors = [ cm.jet(x) for x in np.linspace(0, 1, ind.shape[0]) ]
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter(A_r[Cl_ind==i,0], A_r[Cl_ind==i,1], color=color, alpha=.8, lw=2, label=target_name)
plt.title('Score plot')
plt.xlabel('PC 0 ({0:.0f}%)'.format(pca.explained_variance_ratio_[0] * 100))
plt.ylabel('PC 1 ({0:.0f}%)'.format(pca.explained_variance_ratio_[1] * 100))
plt.figure()
plt.title('Score box plot')
plt.xlabel('Principal Component')
plt.ylabel('Score')
for j in range(pca.components_.shape[0]):
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter([j+1]*len(A_r[Cl_ind==i,j]), A_r[Cl_ind==i,j], color=color, alpha=.8, lw=2, label=target_name)
plt.boxplot(A_r)
plt.figure()
#******************************
''' Plotting Scores vs H:C '''
#******************************
for j in range(pca.components_.shape[0]):
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter(np.asarray(Cl)[Cl_ind==i], A_r[Cl_ind==i,j], color=color, alpha=.8, lw=2, label=target_name)
plt.xlabel('H:C elemental ratio')
plt.ylabel('PC ' + str(j) + ' ({0:.0f}%)'.format(pca.explained_variance_ratio_[j] * 100))
plt.figure()
plt.show()
#********************
''' Run K-Means '''
#********************
def runKMmain(A, Cl, En, R, Aorig, Rorig):
from sklearn.cluster import KMeans
print('==========================================================================\n')
print(' Running K-Means...')
print(' Number of unique identifiers in training data: ' + str(np.unique(Cl).shape[0]))
if kmDef.customNumKMComp == False:
numKMcomp = np.unique(Cl).shape[0]
else:
numKMcomp = kmDef.numKMcomponents
kmeans = KMeans(n_clusters=numKMcomp, random_state=0).fit(A)
'''
for i in range(0, numKMcomp):
print('\n Class: ' + str(i) + '\n ',end="")
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == i:
print(' ' + str(Cl[j]), end="")
'''
print('\n ==============================')
print(' \033[1mKM\033[0m - Predicted class: \033[1m',str(kmeans.predict(R)[0]),'\033[0m')
print(' ==============================')
print(' Prediction')
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == 22:
print(' ' + str(Cl[j]))
print(' ==============================\n')
if kmDef.plotKM == True:
import matplotlib.pyplot as plt
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == kmeans.predict(R)[0]:
plt.plot(En, Aorig[j,:])
plt.plot(En, Rorig[0,:], linewidth = 2, label='Predict')
plt.title('K-Means')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Intensity')
plt.legend()
plt.show()
return kmeans.predict(R)[0]
#**********************************************
''' K-Means - Maps'''
#**********************************************
def KmMap(mapFile, numKMcomp):
''' Open prediction map '''
X, Y, R, Rx = readPredMap(mapFile)
type = 0
i = 0;
R, Rx, Rorig = preProcessNormMap(R, Rx, type)
from sklearn.cluster import KMeans
print(' Running K-Means...')
print(' Number of classes: ' + str(numKMcomp))
kmeans = KMeans(n_clusters=kmDef.numKMcomponents, random_state=0).fit(R)
kmPred = np.empty([R.shape[0]])
for i in range(0, R.shape[0]):
kmPred[i] = kmeans.predict(R[i,:].reshape(1,-1))[0]
saveMap(mapFile, 'KM', 'Class', int(kmPred[i]), X[i], Y[i], True)
if kmPred[i] in kmeans.labels_:
if os.path.isfile(saveMapName(mapFile, 'KM', 'Class_'+ str(int(kmPred[i]))+'-'+str(np.unique(kmeans.labels_).shape[0]), False)) == False:
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, Rx)), ' ', ' ', False)
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, R[1,:])), X[i], Y[i], False)
if kmDef.plotKM == True:
plotMaps(X, Y, kmPred, 'K-Means')
#************************************
''' Read Learning file '''
#************************************
def readLearnFile(learnFile):
try:
with open(learnFile, 'r') as f:
M = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Map data file not found \n' + '\033[0m')
return
En = np.delete(np.array(M[0,:]),np.s_[0:1],0)
M = np.delete(M,np.s_[0:1],0)
Cl = ['{:.2f}'.format(x) for x in M[:,0]]
A = np.delete(M,np.s_[0:1],1)
Atemp = A[:,range(len(preprocDef.enSel))]
if preprocDef.cherryPickEnPoint == True and preprocDef.enRestrictRegion == False:
enPoints = list(preprocDef.enSel)
enRange = list(preprocDef.enSel)
for i in range(0, len(preprocDef.enSel)):
enRange[i] = np.where((En<float(preprocDef.enSel[i]+preprocDef.enSelDelta[i])) & (En>float(preprocDef.enSel[i]-preprocDef.enSelDelta[i])))[0].tolist()
for j in range(0, A.shape[0]):
Atemp[j,i] = A[j,A[j,enRange[i]].tolist().index(max(A[j, enRange[i]].tolist()))+enRange[i][0]]
enPoints[i] = int(np.average(enRange[i]))
A = Atemp
En = En[enPoints]
if type == 0:
print( ' Cheery picking points in the spectra\n')
# Find index corresponding to energy value to be used for Y normalization
if preprocDef.fullYnorm == True:
YnormXind = np.where(En>0)[0].tolist()
else:
YnormXind_temp = np.where((En<float(preprocDef.YnormX+preprocDef.YnormXdelta)) & (En>float(preprocDef.YnormX-preprocDef.YnormXdelta)))[0].tolist()
if YnormXind_temp == []:
print( ' Renormalization region out of requested range. Normalizing over full range...\n')
YnormXind = np.where(En>0)[0].tolist()
else:
YnormXind = YnormXind_temp
print(' Number of datapoints = ' + str(A.shape[0]))
print(' Size of each datapoint = ' + str(A.shape[1]) + '\n')
return En, Cl, A, YnormXind
#**********************************************
''' Open prediction file '''
#**********************************************
def readPredFile(sampleFile):
try:
with open(sampleFile, 'r') as f:
print(' Opening sample data for prediction...')
Rtot = np.loadtxt(f, unpack =True)
except:
print('\033[1m' + '\n Sample data file not found \n ' + '\033[0m')
return
R=Rtot[1,:]
Rx=Rtot[0,:]
if preprocDef.cherryPickEnPoint == True and preprocDef.enRestrictRegion == False:
Rtemp = R[range(len(preprocDef.enSel))]
enPoints = list(preprocDef.enSel)
enRange = list(preprocDef.enSel)
for i in range(0, len(preprocDef.enSel)):
enRange[i] = np.where((Rx<float(preprocDef.enSel[i]+preprocDef.enSelDelta[i])) & (Rx>float(preprocDef.enSel[i]-preprocDef.enSelDelta[i])))[0].tolist()
Rtemp[i] = R[R[enRange[i]].tolist().index(max(R[enRange[i]].tolist()))+enRange[i][0]]
enPoints[i] = int(np.average(enRange[i]))
R = Rtemp
Rx = Rx[enPoints]
return R, Rx
#**********************************************************************************
''' Preprocess Learning data '''
#**********************************************************************************
def preProcessNormLearningData(A, En, Cl, YnormXind, type):
print(' Processing Training data file... ')
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
if preprocDef.scrambleNoiseFlag == True:
print(' Adding random noise to training set \n')
scrambleNoise(A, preprocDef.scrambleNoiseOffset)
Aorig = np.copy(A)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
if preprocDef.fullYnorm == False:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
else:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; to max intensity in spectra')
for i in range(0,A.shape[0]):
if(np.amin(A[i]) <= 0):
A[i,:] = A[i,:] - np.amin(A[i,:]) + 0.00001
A[i,:] = np.multiply(A[i,:], preprocDef.YnormTo/A[i,A[i][YnormXind].tolist().index(max(A[i][YnormXind].tolist()))+YnormXind[0]])
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
A = preprocDef.scaler.fit_transform(A)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
Aorig = Aorig[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
if(preprocDef.cherryPickEnPoint == True):
print( ' Using selected spectral points:')
print(En)
else:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return A, Cl, En, Aorig
#**********************************************************************************
''' Preprocess Prediction data '''
#**********************************************************************************
def preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, type):
print(' Processing Prediction data file... ')
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
if(R.shape[0] != A.shape[1]):
if type == 0:
print('\033[1m' + ' WARNING: Different number of datapoints for the x-axis\n for training (' + str(A.shape[1]) + ') and sample (' + str(R.shape[0]) + ') data.\n Reformatting x-axis of sample data...\n' + '\033[0m')
R = np.interp(En, Rx, R)
R = R.reshape(1,-1)
Rorig = np.copy(R)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
if preprocDef.fullYnorm == False:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
else:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; to max intensity in spectra')
if(np.amin(R) <= 0):
print(' Spectra max below zero detected')
R[0,:] = R[0,:] - np.amin(R[0,:]) + 0.00001
R[0,:] = np.multiply(R[0,:], preprocDef.YnormTo/R[0,R[0][YnormXind].tolist().index(max(R[0][YnormXind].tolist()))+YnormXind[0]])
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
R = preprocDef.scaler.transform(R)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
R = R[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
if(preprocDef.cherryPickEnPoint == True):
print( ' Using selected spectral points:')
print(En)
else:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return R, Rorig
#**********************************************************************************
''' Preprocess prediction data '''
#**********************************************************************************
def preProcessNormMap(A, En, type):
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
# Find index corresponding to energy value to be used for Y normalization
if preprocDef.fullYnorm == False:
YnormXind = np.where((En<float(preprocDef.YnormX+preprocDef.YnormXdelta)) & (En>float(preprocDef.YnormX-preprocDef.YnormXdelta)))[0].tolist()
else:
YnormXind = np.where(En>0)[0].tolist()
Aorig = np.copy(A)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
for i in range(0,A.shape[0]):
A[i,:] = np.multiply(A[i,:], preprocDef.YnormTo/np.amax(A[i]))
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
A = preprocDef.scaler.fit_transform(A)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
Aorig = Aorig[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return A, En, Aorig
####################################################################
''' Format subset of training data '''
####################################################################
def formatSubset(A, Cl, percent):
from sklearn.model_selection import train_test_split
A_train, A_cv, Cl_train, Cl_cv = \
train_test_split(A, Cl, test_size=percent, random_state=42)
return A_train, Cl_train, A_cv, Cl_cv
####################################################################
''' Open map files '''
####################################################################
def readPredMap(mapFile):
try:
with open(mapFile, 'r') as f:
En = np.array(f.readline().split(), dtype=np.dtype(float))
A = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Map data file not found \n' + '\033[0m')
return
X = A[:,0]
Y = A[:,1]
A = np.delete(A, np.s_[0:2], 1)
print(' Shape map: ' + str(A.shape))
return X, Y, A, En
####################################################################
''' Save map files '''
####################################################################
def saveMap(file, type, extension, s, x1, y1, comma):
inputFile = saveMapName(file, type, extension, comma)
with open(inputFile, "a") as coord_file:
if comma==True:
coord_file.write('{:},'.format(x1))
coord_file.write('{:},'.format(y1))
else:
coord_file.write('{:}\t'.format(x1))
coord_file.write('{:}\t'.format(y1))
coord_file.write('{:}\n'.format(s))
def saveMapName(file, type, extension, comma):
if comma==True:
extension2 = '_map.csv'
else:
extension2 = '_map.txt'
return os.path.splitext(file)[0] + '_' + type + '-' + extension + extension2
#************************************
''' Plot Probabilities'''
#************************************
def plotProb(clf, R):
prob = clf.predict_proba(R)[0].tolist()
print(' Probabilities of this sample within each class: \n')
for i in range(0,clf.classes_.shape[0]):
print(' ' + str(clf.classes_[i]) + ': ' + str(round(100*prob[i],2)) + '%')
import matplotlib.pyplot as plt
print('\n Stand by: Plotting probabilities for each class... \n')
plt.title('Probability density per class')
for i in range(0, clf.classes_.shape[0]):
plt.scatter(clf.classes_[i], round(100*prob[i],2), label='probability', c = 'red')
plt.grid(True)
plt.xlabel('Class')
plt.ylabel('Probability [%]')
plt.show()
#************************************
''' Plot Training data'''
#************************************
def plotTrainData(A, En, R, plotAllSpectra, learnFileRoot):
import matplotlib.pyplot as plt
if plotDef.plotAllSpectra == True:
step = 1
learnFileRoot = learnFileRoot + '_full-set'
else:
step = plotDef.stepSpectraPlot
learnFileRoot = learnFileRoot + '_partial-' + str(step)
print(' Plotting Training dataset in: ' + learnFileRoot + '.png\n')
if preprocDef.Ynorm ==True:
plt.title('Normalized Training Data')
else:
plt.title('Training Data')
for i in range(0,A.shape[0], step):
plt.plot(En, A[i,:], label='Training data')
plt.plot(En, R[0,:], linewidth = 4, label='Sample data')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Raman Intensity [arb. units]')
plt.savefig(learnFileRoot + '.png', dpi = 160, format = 'png') # Save plot
if plotDef.showTrainingDataPlot == True:
plt.show()
plt.close()
#************************************
''' Plot Processed Maps'''
#************************************
def plotMaps(X, Y, A, label):
print(' Plotting ' + label + ' Map...\n')
import scipy.interpolate
xi = np.linspace(min(X), max(X))
yi = np.linspace(min(Y), max(Y))
xi, yi = np.meshgrid(xi, yi)
rbf = scipy.interpolate.Rbf(Y, -X, A, function='linear')
zi = rbf(xi, yi)
import matplotlib.pyplot as plt
plt.imshow(zi, vmin=A.min(), vmax=A.max(), origin='lower',label='data',
extent=[X.min(), X.max(), Y.min(), Y.max()])
plt.title(label)
plt.xlabel('X [um]')
plt.ylabel('Y [um]')
plt.show()
####################################################################
''' Make header, if absent, for the summary file '''
####################################################################
def makeHeaderSummary(file, learnFile):
if os.path.isfile(file) == False:
summaryHeader1 = ['Training File:', learnFile]
summaryHeader2 = ['File','SVM-HC','SVM-Prob%', 'NN-HC', 'NN-Prob%', 'TF-HC', 'TF-Prob%', 'TF-Accuracy%']
with open(file, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryHeader1)
csv_out.writerow(summaryHeader2)
#************************************
''' Lists the program usage '''
#************************************
def usage():
print('\n Usage:\n')
print(' Single files:')
print(' python3 SpectraLearnPredict.py -f <learningfile> <spectrafile> \n')
print(' Single files with cross-validation for accuracy determination: ')
print(' python3 SpectraLearnPredict.py -a <learningfile> <spectrafile> \n')
print(' Maps (formatted for Horiba LabSpec):')
print(' python3 SpectraLearnPredict.py -m <learningfile> <spectramap> \n')
print(' Batch txt files:')
print(' python3 SpectraLearnPredict.py -b <learningfile> \n')
print(' K-means on maps:')
print(' python3 SpectraLearnPredict.py -k <spectramap> <number_of_classes>\n')
print(' Principal component analysis on spectral collection files: ')
print(' python3 SpectraLearnPredict.py -p <spectrafile> <#comp>\n')
print(' Run tensorflow training only:')
print(' python3 SpectraLearnPredict.py -t <learningfile> <# iterations>\n')
print(' Requires python 3.x. Not compatible with python 2.x\n')
#************************************
''' Info on Classification Report '''
#************************************
def runClassReport(clf, A, Cl):
from sklearn.metrics import classification_report
y_pred = clf.predict(A)
print(classification_report(Cl, y_pred, target_names=clf.classes_))
print(' Precision is the probability that, given a classification result for a sample,\n' +
' the sample actually belongs to that class. Recall (Accuracy) is the probability that a \n' +
' sample will be correctly classified for a given class. f1-score combines both \n' +
' accuracy and precision to give a single measure of relevancy of the classifier results.\n')
#************************************
''' Introduce Noise in Data '''
#************************************
def scrambleNoise(A, offset):
from random import uniform
for i in range(A.shape[1]):
A[:,i] += offset*uniform(-1,1)
#********************************************************************************
''' Tensorflow '''
''' https://www.tensorflow.org/get_started/mnist/beginners'''
#********************************************************************************
''' Setup training-only via TensorFlow '''
#********************************************************************************
def TrainTF(learnFile, numRuns):
learnFileRoot = os.path.splitext(learnFile)[0]
summary_filename = learnFileRoot + '_summary-TF-training' + str(datetime.now().strftime('_%Y-%m-%d_%H-%M-%S.log'))
tfDef.alwaysRetrain = True
tfDef.alwaysImprove = True
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
En_temp = En
Cl_temp = Cl
A_temp = A
with open(summary_filename, "a") as sum_file:
sum_file.write(str(datetime.now().strftime('Training started: %Y-%m-%d %H:%M:%S\n')))
if preprocDef.scrambleNoiseFlag == True:
sum_file.write(' Using Noise scrambler (offset: ' + str(preprocDef.scrambleNoiseOffset) + ')\n\n')
sum_file.write('\nIteration\tAccuracy %\t Prediction\t Probability %\n')
index = random.randint(0,A.shape[0]-1)
R = A[index,:]
if preprocDef.scrambleNoiseFlag == False:
A_temp, Cl_temp, En_temp, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
''' Plot Training Data '''
if plotDef.createTrainingDataPlot == True:
plotTrainData(A, En, R.reshape(1,-1), plotDef.plotAllSpectra, learnFileRoot)
for i in range(numRuns):
print(' Running tensorflow training iteration: ' + str(i+1) + '\n')
''' Preprocess prediction data '''
if preprocDef.scrambleNoiseFlag == True:
A_temp, Cl_temp, En_temp, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
R_temp, Rorig = preProcessNormPredData(R, En, A_temp, En_temp, Cl_temp, YnormXind, 0)
print(' Using random spectra from training dataset as evaluation file ')
tfPred, tfProb, tfAccur = runTensorFlow(A_temp,Cl_temp,R_temp,learnFileRoot)
with open(summary_filename, "a") as sum_file:
sum_file.write(str(i+1) + '\t{:10.2f}\t'.format(tfAccur) + str(tfPred) + '\t{:10.2f}\n'.format(tfProb))
print(' Nominal class for prediction spectra:', str(index+1), '\n')
with open(summary_filename, "a") as sum_file:
sum_file.write(str(datetime.now().strftime('\nTraining ended: %Y-%m-%d %H:%M:%S\n')))
print(' Completed ' + str(numRuns) + ' Training iterations. \n')
#********************************************************************************
''' Format vectors of unique labels '''
#********************************************************************************
def formatClass(rootFile, Cl):
import sklearn.preprocessing as pp
print('==========================================================================\n')
print(' Running basic TensorFlow. Creating class data in binary form...')
Cl2 = pp.LabelBinarizer().fit_transform(Cl)
import matplotlib.pyplot as plt
plt.hist([float(x) for x in Cl], bins=np.unique([float(x) for x in Cl]), edgecolor="black")
plt.xlabel('Class')
plt.ylabel('Occurrances')
plt.title('Class distibution')
plt.savefig(rootFile + '_ClassDistrib.png', dpi = 160, format = 'png') # Save plot
if tfDef.plotClassDistribTF == True:
print(' Plotting Class distibution \n')
plt.show()
return Cl2
#********************************************************************************
''' Run basic model training and evaluation via TensorFlow '''
#********************************************************************************
def runTFbasic(A, Cl, R, Root):
import tensorflow as tf
tfTrainedData = Root + '.tfmodel'
Cl2 = formatClass(Root, Cl)
print(' Initializing TensorFlow...')
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, A.shape[1]])
W = tf.Variable(tf.zeros([A.shape[1], np.unique(Cl).shape[0]]))
b = tf.Variable(tf.zeros(np.unique(Cl).shape[0]))
y_ = tf.placeholder(tf.float32, [None, np.unique(Cl).shape[0]])
# The raw formulation of cross-entropy can be numerically unstable
#y = tf.nn.softmax(tf.matmul(x, W) + b)
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), axis=[1]))
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
if tfDef.decayLearnRate == True:
print(' Using decaying learning rate, start at:',tfDef.learnRate, '\n')
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = tfDef.learnRate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy, global_step=global_step)
else:
print(' Using fix learning rate:', tfDef.learnRate, '\n')
train_step = tf.train.GradientDescentOptimizer(tfDef.learnRate).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
if tfDef.enableTensorboard == True:
writer = tf.summary.FileWriter(".", sess.graph)
print('\n Saving graph. Accessible via tensorboard. \n')
saver = tf.train.Saver()
accur = 0
try:
if tfDef.alwaysRetrain == False:
print(' Opening TF training model from:', tfTrainedData)
saver.restore(sess, './' + tfTrainedData)
print('\n Model restored.\n')
else:
raise ValueError(' Force TF model retraining.')
except:
init = tf.global_variables_initializer()
sess.run(init)
if os.path.isfile(tfTrainedData + '.meta') & tfDef.alwaysImprove == True:
print('\n Improving TF model...')
saver.restore(sess, './' + tfTrainedData)
else:
print('\n Rebuildind TF model...')
if tfDef.subsetCrossValid == True:
print(' Iterating training using subset (' + str(tfDef.percentCrossValid*100) + '%), ' + str(tfDef.iterCrossValid) + ' times ...')
for i in range(tfDef.iterCrossValid):
As, Cl2s, As_cv, Cl2s_cv = formatSubset(A, Cl2, tfDef.percentCrossValid)
summary = sess.run(train_step, feed_dict={x: As, y_: Cl2s})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accur = 100*accuracy.eval(feed_dict={x:As_cv, y_:Cl2s_cv})
else:
summary = sess.run(train_step, feed_dict={x: A, y_: Cl2})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accur = 100*accuracy.eval(feed_dict={x:A, y_:Cl2})
save_path = saver.save(sess, tfTrainedData)
print(' Model saved in file: %s\n' % save_path)
print('\033[1m Accuracy: ' + str('{:.3f}'.format(accur)) + '%\n\033[0m')
if tfDef.enableTensorboard == True:
writer.close()
res1 = sess.run(y, feed_dict={x: R})
res2 = sess.run(tf.argmax(y, 1), feed_dict={x: R})
sess.close()
rosterPred = np.where(res1[0]>tfDef.thresholdProbabilityTFPred)[0]
print(' ==============================')
print(' \033[1mTF\033[0m - Probability >',str(tfDef.thresholdProbabilityTFPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.1f}'.format(res1[0][rosterPred][i])))
print(' ==============================\n')
print('\033[1m Predicted value (TF): ' + str(np.unique(Cl)[res2][0]) + ' (Probability: ' + str('{:.1f}'.format(res1[0][res2][0])) + '%)\n' + '\033[0m' )
return np.unique(Cl)[res2][0], res1[0][res2][0], accur
#************************************
''' Main initialization routine '''
#************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
esikachev/scenario | sahara/openstack/common/threadgroup.py | 7 | 4838 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import eventlet
from eventlet import greenpool
from sahara.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
| apache-2.0 |
neuralyzer/kerasvis | kerasvis/server/main.py | 1 | 2628 | import time
import os
from flask import Flask, render_template, redirect
from .plots import empty_plot, loss_accuracy_plot
from .dataloader import LogDataLoader, to_dict
from ..callback import DBLogger
app = Flask(__name__)
db_folder = os.path.join(os.environ["HOME"], "tmp")
app.config["keras_log_db_path"] = "sqlite:///" + os.path.join(db_folder, "keras_logs.db")
try:
app.config["allow_delete"] = os.environ["KERASVIS_ALLOW_DELETE"] != "False"
except KeyError:
app.config["allow_delete"] = True
print(app.config["allow_delete"], type(app.config["allow_delete"]))
print("DB is", app.config["keras_log_db_path"])
def db_or_redirect_to_nodb():
try:
return LogDataLoader(path=app.config["keras_log_db_path"])
except ValueError:
redirect("/nodb")
@app.route("/")
def main():
db = db_or_redirect_to_nodb()
return render_template("overview.html", data=zip(*db.get_overview()))
@app.route("/nodb")
def nodb():
return render_template("nodb.html")
@app.route("/id/<int:id>")
def detail(id):
start_time = time.time()
db = db_or_redirect_to_nodb()
if not db.id_exists(id):
return render_template("idnotfound.html", id=id)
comment = db.get_comment(id)
df = db.get_data(id)
last_update_time = db.get_last_update_time(id)
config_string = db.get_config(id)
config_dict = to_dict(config_string)
layers = config_dict["layers"] if "layers" in config_dict else config_dict["config"]["layers"] if "layers" in config_dict["config"] else config_dict["config"]
duration = time.time() - start_time
general = {key: value for key, value in config_dict.items() if key != "layers"}
if "optimizer" not in general:
general["optimizer"] = {"name": "not found"}
if db.id_exists(id) and len(df) > 0:
loss_plot = loss_accuracy_plot(df, "epoch", [["loss", "val_loss"], ["acc", "val_acc"]])
else:
loss_plot = empty_plot
return render_template("detail.html",
loss=loss_plot,
comment=comment,
id=id,
config_data=config_string,
layers=layers,
general=general,
last_update_time=last_update_time,
runs=zip(*db.get_overview()[:2]),
db_load_time=str(round(duration, 2)) + " s")
@app.route("/remove/<int:id>")
def remove(id):
if app.config["allow_delete"]:
DBLogger(db_folder=db_folder, id=id).delete(no_confirm=True)
return redirect("/")
app.debug = True
| gpl-3.0 |
bloodearnest/talisker | tests/test_django.py | 2 | 2070 | #
# Copyright (c) 2015-2018 Canonical, Ltd.
#
# This file is part of Talisker
# (see http://github.com/canonical-ols/talisker).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
import pytest
try:
import django # noqa
except ImportError:
pytest.skip("skipping django only tests", allow_module_level=True)
import talisker.django
import talisker.sentry
from talisker.testing import TEST_SENTRY_DSN
@pytest.mark.skipif(not talisker.sentry.enabled, reason='need raven installed')
def test_django_sentry_client(monkeypatch, context):
from talisker.sentry import DummySentryTransport
called = [False]
def hook():
called[0] = True
monkeypatch.setattr('raven.contrib.django.client.install_sql_hook', hook)
client = talisker.django.SentryClient(
dsn=TEST_SENTRY_DSN,
transport=DummySentryTransport,
install_sql_hook=True,
)
assert called[0] is False
assert set(client.processors) == talisker.sentry.default_processors
context.assert_log(msg='configured raven')
assert talisker.sentry.get_client() is client
assert talisker.sentry.get_log_handler().client is client
| gpl-3.0 |
arnif/CouchPotatoServer | libs/pyutil/hashexpand.py | 106 | 2890 | # Copyright (c) 2002-2012 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import warnings
"""
Cryptographically strong pseudo-random number generator based on SHA256.
"""
class SHA256Expander:
"""
Provide a cryptographically strong pseudo-random number generator based on
SHA256. Hopefully this means that no attacker will be able to predict any
bit of output that he hasn't seen, given that he doesn't know anything about
the seed and given that he can see as many bits of output as he desires
except for the bit that he is trying to predict. Further it is hoped that
an attacker will not even be able to determine whether a given stream of
random bytes was generated by this PRNG or by flipping a coin repeatedly.
The safety of this technique has not been verified by a Real Cryptographer.
... but it is similar to the PRNG in FIPS-186...
The seed and counter are encoded in DJB's netstring format so that I
don't have to think about the possibility of ambiguity.
Note: I've since learned more about the theory of secure hash functions
and the above is a strong assumption about a secure hash function. Use
of this class should be considered deprecated and you should use a more
well-analyzed KDF (such as the nascent standard HKDF) or stream cipher or
whatever it is that you need.
"""
def __init__(self, seed=None):
warnings.warn("deprecated", DeprecationWarning)
if seed is not None:
self.seed(seed)
def seed(self, seed):
import hashlib
self.starth = hashlib.sha256('24:pyutil hash expansion v2,10:algorithm:,6:SHA256,6:value:,')
seedlen = len(seed)
seedlenstr = str(seedlen)
self.starth.update(seedlenstr)
self.starth.update(':')
self.starth.update(seed)
self.starth.update(',')
self.avail = ""
self.counter = 0
def get(self, bytes):
bytesleft = bytes
res = []
while bytesleft > 0:
if len(self.avail) == 0:
h = self.starth.copy()
counterstr = str(self.counter)
counterstrlen = len(counterstr)
counterstrlenstr = str(counterstrlen)
h.update(counterstrlenstr)
h.update(':')
h.update(counterstr)
h.update(',')
self.avail = h.digest()
self.counter += 1
numb = min(len(self.avail), bytesleft)
(chunk, self.avail,) = (self.avail[:numb], self.avail[numb:],)
res.append(chunk)
bytesleft = bytesleft - numb
resstr = ''.join(res)
assert len(resstr) == bytes
return resstr
def sha256expand(inpstr, expbytes):
return SHA256Expander(inpstr).get(expbytes)
| gpl-3.0 |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/cleverhans/attacks/sparse_l1_descent.py | 1 | 14083 | """
The SparseL1Descent attack.
"""
import warnings
from distutils.version import LooseVersion
import tensorflow as tf
from cleverhans.attacks.attack import Attack
from cleverhans import utils_tf
from cleverhans.utils_tf import clip_eta, random_lp_vector
from cleverhans.compat import reduce_max, reduce_sum, softmax_cross_entropy_with_logits
class SparseL1Descent(Attack):
"""
This class implements a variant of Projected Gradient Descent for the l1-norm
(Tramer and Boneh 2019). The l1-norm case is more tricky than the l-inf and l2
cases covered by the ProjectedGradientDescent class, because the steepest
descent direction for the l1-norm is too sparse (it updates a single
coordinate in the adversarial perturbation in each step). This attack has an
additional parameter that controls the sparsity of the update step. For
moderately sparse update steps, the attack vastly outperforms Projected
Steepest Descent and is competitive with other attacks targeted at the l1-norm
such as the ElasticNetMethod attack (which is much more computationally
expensive).
Paper link (Tramer and Boneh 2019): https://arxiv.org/pdf/1904.13000.pdf
:param model: cleverhans.model.Model
:param sess: optional tf.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess=None, dtypestr="float32", **kwargs):
"""
Create a SparseL1Descent instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(SparseL1Descent, self).__init__(
model, sess=sess, dtypestr=dtypestr, **kwargs
)
self.feedable_kwargs = (
"eps",
"eps_iter",
"y",
"y_target",
"clip_min",
"clip_max",
"grad_sparsity",
)
self.structural_kwargs = ["nb_iter", "rand_init", "clip_grad", "sanity_checks"]
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(
utils_tf.assert_greater_equal(x, tf.cast(self.clip_min, x.dtype))
)
if self.clip_max is not None:
asserts.append(
utils_tf.assert_less_equal(x, tf.cast(self.clip_max, x.dtype))
)
# Initialize loop variables
if self.rand_init:
eta = random_lp_vector(
tf.shape(x), ord=1, eps=tf.cast(self.eps, x.dtype), dtype=x.dtype
)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, ord=1, eps=self.eps)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
if self.y_target is not None:
y = self.y_target
targeted = True
elif self.y is not None:
y = self.y
targeted = False
else:
model_preds = self.model.get_probs(x)
preds_max = tf.reduce_max(model_preds, 1, keepdims=True)
y = tf.to_float(tf.equal(model_preds, preds_max))
y = tf.stop_gradient(y)
targeted = False
del model_preds
y_kwarg = "y_target" if targeted else "y"
def cond(i, _):
"""Iterate until requested number of iterations is completed"""
return tf.less(i, self.nb_iter)
def body(i, adv_x):
"""Do a projected gradient step"""
labels, _ = self.get_or_guess_labels(adv_x, {y_kwarg: y})
logits = self.model.get_logits(adv_x)
adv_x = sparse_l1_descent(
adv_x,
logits,
y=labels,
eps=self.eps_iter,
q=self.grad_sparsity,
clip_min=self.clip_min,
clip_max=self.clip_max,
clip_grad=self.clip_grad,
targeted=(self.y_target is not None),
sanity_checks=self.sanity_checks,
)
# Clipping perturbation eta to the l1-ball
eta = adv_x - x
eta = clip_eta(eta, ord=1, eps=self.eps)
adv_x = x + eta
# Redo the clipping.
# Subtracting and re-adding eta can add some small numerical error.
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return i + 1, adv_x
_, adv_x = tf.while_loop(
cond,
body,
(tf.zeros([]), adv_x),
back_prop=True,
maximum_iterations=self.nb_iter,
)
# Asserts run only on CPU.
# When multi-GPU eval code tries to force all PGD ops onto GPU, this
# can cause an error.
common_dtype = tf.float32
asserts.append(
utils_tf.assert_less_equal(
tf.cast(self.eps_iter, dtype=common_dtype),
tf.cast(self.eps, dtype=common_dtype),
)
)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
def parse_params(
self,
eps=10.0,
eps_iter=1.0,
nb_iter=20,
y=None,
clip_min=None,
clip_max=None,
y_target=None,
rand_init=False,
clip_grad=False,
grad_sparsity=99,
sanity_checks=True,
**kwargs
):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (optional float) step size for each attack iteration
:param nb_iter: (optional int) Number of attack iterations.
:param y: (optional) A tensor with the true labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
:param clip_grad: (optional bool) Ignore gradient components
at positions where the input is already at the boundary
of the domain, and the update step will get clipped out.
:param grad_sparsity (optional) Relative sparsity of the gradient update
step, in percent. Only gradient values larger
than this percentile are retained. This parameter can
be a scalar, or a vector of the same length as the
input batch dimension.
:param sanity_checks: bool Insert tf asserts checking values
(Some tests need to run with no sanity checks because the
tests intentionally configure the attack strangely)
"""
# Save attack-specific parameters
self.eps = eps
self.rand_init = rand_init
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.y_target = y_target
self.clip_min = clip_min
self.clip_max = clip_max
self.clip_grad = clip_grad
self.grad_sparsity = grad_sparsity
if isinstance(eps, float) and isinstance(eps_iter, float):
# If these are both known at compile time, we can check before anything
# is run. If they are tf, we can't check them yet.
assert eps_iter <= eps, (eps_iter, eps)
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
if self.clip_grad and (self.clip_min is None or self.clip_max is None):
raise ValueError("Must set clip_min and clip_max if clip_grad is set")
# The grad_sparsity argument governs the sparsity of the gradient
# update. It indicates the percentile value above which gradient entries
# are retained. It can be specified as a scalar or as a 1-dimensional
# vector of the same size as the input's batch dimension.
if isinstance(self.grad_sparsity, int) or isinstance(self.grad_sparsity, float):
if not 0 < self.grad_sparsity < 100:
raise ValueError("grad_sparsity should be in (0, 100)")
else:
self.grad_sparsity = tf.convert_to_tensor(self.grad_sparsity)
if len(self.grad_sparsity.shape) > 1:
raise ValueError("grad_sparsity should either be a scalar or a vector")
self.sanity_checks = sanity_checks
if len(kwargs.keys()) > 0:
warnings.warn(
"kwargs is unused and will be removed on or after " "2019-04-26."
)
return True
def sparse_l1_descent(
x,
logits,
y=None,
eps=1.0,
q=99,
clip_min=None,
clip_max=None,
clip_grad=False,
targeted=False,
sanity_checks=True,
):
"""
TensorFlow implementation of the Dense L1 Descent Method.
:param x: the input placeholder
:param logits: output of model.get_logits
:param y: (optional) A placeholder for the true labels. If targeted
is true, then provide the target label. Otherwise, only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param q: the percentile above which gradient values are retained. Either a
scalar or a vector of same length as the input batch dimension.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param clip_grad: (optional bool) Ignore gradient components
at positions where the input is already at the boundary
of the domain, and the update step will get clipped out.
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x, tf.cast(clip_min, x.dtype)))
if clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
# Make sure the caller has not passed probs by accident
assert logits.op.type != "Softmax"
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = reduce_max(logits, 1, keepdims=True)
y = tf.to_float(tf.equal(logits, preds_max))
y = tf.stop_gradient(y)
y = y / reduce_sum(y, 1, keepdims=True)
# Compute loss
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
# Define gradient of loss wrt input
(grad,) = tf.gradients(loss, x)
if clip_grad:
grad = utils_tf.zero_out_clipped_grads(grad, x, clip_min, clip_max)
red_ind = list(range(1, len(grad.get_shape())))
dim = tf.reduce_prod(tf.shape(x)[1:])
abs_grad = tf.reshape(tf.abs(grad), (-1, dim))
# if q is a scalar, broadcast it to a vector of same length as the batch dim
q = tf.cast(tf.broadcast_to(q, tf.shape(x)[0:1]), tf.float32)
k = tf.cast(tf.floor(q / 100 * tf.cast(dim, tf.float32)), tf.int32)
# `tf.sort` is much faster than `tf.contrib.distributions.percentile`.
# For TF <= 1.12, use `tf.nn.top_k` as `tf.sort` is not implemented.
if LooseVersion(tf.__version__) <= LooseVersion("1.12.0"):
# `tf.sort` is only available in TF 1.13 onwards
sorted_grad = -tf.nn.top_k(-abs_grad, k=dim, sorted=True)[0]
else:
sorted_grad = tf.sort(abs_grad, axis=-1)
idx = tf.stack((tf.range(tf.shape(abs_grad)[0]), k), -1)
percentiles = tf.gather_nd(sorted_grad, idx)
tied_for_max = tf.greater_equal(abs_grad, tf.expand_dims(percentiles, -1))
tied_for_max = tf.reshape(tf.cast(tied_for_max, x.dtype), tf.shape(grad))
num_ties = tf.reduce_sum(tied_for_max, red_ind, keepdims=True)
optimal_perturbation = tf.sign(grad) * tied_for_max / num_ties
# Add perturbation to original example to obtain adversarial example
adv_x = x + utils_tf.mul(eps, optimal_perturbation)
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
| mit |
BrotherPhil/django | django/core/files/locks.py | 725 | 3516 | """
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return (ret == 0)
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return (ret == 0)
| bsd-3-clause |
denys-duchier/django | tests/shortcuts/tests.py | 109 | 1737 | from django.test import SimpleTestCase, override_settings
from django.test.utils import require_jinja2
@override_settings(ROOT_URLCONF='shortcuts.urls')
class RenderTests(SimpleTestCase):
def test_render(self):
response = self.client.get('/render/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertFalse(hasattr(response.context.request, 'current_app'))
def test_render_with_multiple_templates(self):
response = self.client.get('/render/multiple_templates/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/multiple_templates/\n')
def test_render_with_content_type(self):
response = self.client.get('/render/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/content_type/\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render_with_status(self):
response = self.client.get('/render/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'FOO.BAR../render/status/\n')
@require_jinja2
def test_render_with_using(self):
response = self.client.get('/render/using/')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=django')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=jinja2')
self.assertEqual(response.content, b'Jinja2\n')
| bsd-3-clause |
RydrDojo/Ridr | pylotVenv/lib/python2.7/site-packages/wheel/test/test_wheelfile.py | 327 | 4585 | import os
import wheel.install
import wheel.archive
import hashlib
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import codecs
import zipfile
import pytest
import shutil
import tempfile
from contextlib import contextmanager
@contextmanager
def environ(key, value):
old_value = os.environ.get(key)
try:
os.environ[key] = value
yield
finally:
if old_value is None:
del os.environ[key]
else:
os.environ[key] = old_value
@contextmanager
def temporary_directory():
# tempfile.TemporaryDirectory doesn't exist in Python 2.
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@contextmanager
def readable_zipfile(path):
# zipfile.ZipFile() isn't a context manager under Python 2.
zf = zipfile.ZipFile(path, 'r')
try:
yield zf
finally:
zf.close()
def test_verifying_zipfile():
if not hasattr(zipfile.ZipExtFile, '_update_crc'):
pytest.skip('No ZIP verification. Missing ZipExtFile._update_crc.')
sio = StringIO()
zf = zipfile.ZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.writestr("three", b"third file")
zf.close()
# In default mode, VerifyingZipFile checks the hash of any read file
# mentioned with set_expected_hash(). Files not mentioned with
# set_expected_hash() are not checked.
vzf = wheel.install.VerifyingZipFile(sio, 'r')
vzf.set_expected_hash("one", hashlib.sha256(b"first file").digest())
vzf.set_expected_hash("three", "blurble")
vzf.open("one").read()
vzf.open("two").read()
try:
vzf.open("three").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
# In strict mode, VerifyingZipFile requires every read file to be
# mentioned with set_expected_hash().
vzf.strict = True
try:
vzf.open("two").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
vzf.set_expected_hash("two", None)
vzf.open("two").read()
def test_pop_zipfile():
sio = StringIO()
zf = wheel.install.VerifyingZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.close()
try:
zf.pop()
except RuntimeError:
pass # already closed
else:
raise Exception("expected RuntimeError")
zf = wheel.install.VerifyingZipFile(sio, 'a')
zf.pop()
zf.close()
zf = wheel.install.VerifyingZipFile(sio, 'r')
assert len(zf.infolist()) == 1
def test_zipfile_timestamp():
# An environment variable can be used to influence the timestamp on
# TarInfo objects inside the zip. See issue #143. TemporaryDirectory is
# not a context manager under Python 3.
with temporary_directory() as tempdir:
for filename in ('one', 'two', 'three'):
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
zip_base_name = os.path.join(tempdir, 'dummy')
# The earliest date representable in TarInfos, 1980-01-01
with environ('SOURCE_DATE_EPOCH', '315576060'):
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for info in zf.infolist():
assert info.date_time[:3] == (1980, 1, 1)
def test_zipfile_attributes():
# With the change from ZipFile.write() to .writestr(), we need to manually
# set member attributes.
with temporary_directory() as tempdir:
files = (('foo', 0o644), ('bar', 0o755))
for filename, mode in files:
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
os.chmod(path, mode)
zip_base_name = os.path.join(tempdir, 'dummy')
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for filename, mode in files:
info = zf.getinfo(os.path.join(tempdir, filename))
assert info.external_attr == (mode | 0o100000) << 16
assert info.compress_type == zipfile.ZIP_DEFLATED
| mit |
grap/OCB | addons/crm_todo/__init__.py | 66 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_todo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ramitalat/odoo | addons/crm/crm_phonecall.py | 255 | 14844 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection(
[('open', 'Confirmed'),
('cancel', 'Cancelled'),
('pending', 'Pending'),
('done', 'Held')
], string='Status', readonly=True, track_visibility='onchange',
help='The status is set to Confirmed, when a case is created.\n'
'When the call is over, the status is set to Held.\n'
'If the callis not applicable anymore, the status can be set to Cancelled.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help='Duration in minutes and seconds.'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone'),
'partner_mobile': fields.char('Mobile'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state'):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': '1',
'state': _get_default_state,
'user_id': lambda self, cr, uid, ctx: uid,
'active': 1
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_phone': partner.phone,
'partner_mobile': partner.mobile,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') == 'done':
values['date_closed'] = fields.datetime.now()
self.compute_duration(cr, uid, ids, context=context)
elif values.get('state') == 'open':
values['date_open'] = fields.datetime.now()
values['duration'] = 0.0
return super(crm_phonecall, self).write(cr, uid, ids, values, context=context)
def compute_duration(self, cr, uid, ids, context=None):
for phonecall in self.browse(cr, uid, ids, context=context):
if phonecall.duration <= 0:
duration = datetime.now() - datetime.strptime(phonecall.date, DEFAULT_SERVER_DATETIME_FORMAT)
values = {'duration': duration.seconds/float(60)}
self.write(cr, uid, [phonecall.id], values, context=context)
return True
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
'opportunity_id': call.opportunity_id and call.opportunity_id.id or False,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id': opportunity_id,
'state': 'done',
}
self.write(cr, uid, [call.id], vals, context=context)
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
partner_ids = []
phonecall = self.browse(cr, uid, ids[0], context)
if phonecall.partner_id and phonecall.partner_id.email:
partner_ids.append(phonecall.partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_ids': partner_ids,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
servermeta/CraftOS | pam.py | 7 | 3929 | # (c) 2007 Chris AtLee <chris@atlee.ca>
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
PAM module for python
Provides an authenticate function that will allow the caller to authenticate
a user against the Pluggable Authentication Modules (PAM) on the system.
Implemented using ctypes, so no compilation is necessary.
"""
__all__ = ['authenticate']
from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
from ctypes.util import find_library
LIBPAM = CDLL(find_library("pam"))
LIBC = CDLL(find_library("c"))
CALLOC = LIBC.calloc
CALLOC.restype = c_void_p
CALLOC.argtypes = [c_uint, c_uint]
STRDUP = LIBC.strdup
STRDUP.argstypes = [c_char_p]
STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!
# Various constants
PAM_PROMPT_ECHO_OFF = 1
PAM_PROMPT_ECHO_ON = 2
PAM_ERROR_MSG = 3
PAM_TEXT_INFO = 4
class PamHandle(Structure):
"""wrapper class for pam_handle_t"""
_fields_ = [
("handle", c_void_p)
]
def __init__(self):
Structure.__init__(self)
self.handle = 0
class PamMessage(Structure):
"""wrapper class for pam_message structure"""
_fields_ = [
("msg_style", c_int),
("msg", POINTER(c_char)),
]
def __repr__(self):
return "<PamMessage %i '%s'>" % (self.msg_style, self.msg)
class PamResponse(Structure):
"""wrapper class for pam_response structure"""
_fields_ = [
("resp", POINTER(c_char)),
("resp_retcode", c_int),
]
def __repr__(self):
return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp)
CONV_FUNC = CFUNCTYPE(c_int,
c_int, POINTER(POINTER(PamMessage)),
POINTER(POINTER(PamResponse)), c_void_p)
class PamConv(Structure):
"""wrapper class for pam_conv structure"""
_fields_ = [
("conv", CONV_FUNC),
("appdata_ptr", c_void_p)
]
PAM_START = LIBPAM.pam_start
PAM_START.restype = c_int
PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv),
POINTER(PamHandle)]
PAM_END = LIBPAM.pam_end
PAM_END.restpe = c_int
PAM_END.argtypes = [PamHandle, c_int]
PAM_AUTHENTICATE = LIBPAM.pam_authenticate
PAM_AUTHENTICATE.restype = c_int
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
def authenticate(username, password, service='login'):
"""Returns True if the given username and password authenticate for the
given service. Returns False otherwise
``username``: the username to authenticate
``password``: the password in plain text
``service``: the PAM service to authenticate against.
Defaults to 'login'"""
@CONV_FUNC
def my_conv(n_messages, messages, p_response, app_data):
"""Simple conversation function that responds to any
prompt where the echo is off with the supplied password"""
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(str(password))
p_response.contents[i].resp = pw_copy
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = PAM_START(service, username, pointer(conv), pointer(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
PAM_END(handle, retval)
return False
retval = PAM_AUTHENTICATE(handle, 0)
e = PAM_END(handle, retval)
return retval == 0 and e == 0
if __name__ == "__main__":
import getpass
print authenticate(getpass.getuser(), getpass.getpass())
| gpl-3.0 |
peterbe/gg | gg/builtins/branches/tests/test_gg_branches.py | 1 | 2397 | import json
import os
import tempfile
import shutil
import pytest
import requests_mock
from click.testing import CliRunner
# By doing this import we make sure that the plugin is made available
# but the entry points loading inside gg.main.
# An alternative would we to set `PYTHONPATH=. py.test` (or something)
# but then that wouldn't test the entry point loading.
from gg.main import Config
from gg.builtins.branches.gg_branches import branches
@pytest.fixture(autouse=True)
def requestsmock():
"""Return a context where requests are all mocked.
Usage::
def test_something(requestsmock):
requestsmock.get(
'https://example.com/path'
content=b'The content'
)
# Do stuff that involves requests.get('http://example.com/path')
"""
with requests_mock.mock() as m:
yield m
@pytest.yield_fixture
def temp_configfile():
tmp_dir = tempfile.mkdtemp("gg-start")
fp = os.path.join(tmp_dir, "state.json")
with open(fp, "w") as f:
json.dump({}, f)
yield fp
shutil.rmtree(tmp_dir)
def test_branches(temp_configfile, mocker):
mocked_git = mocker.patch("git.Repo")
mocked_git().working_dir = "gg-start-test"
mocked_git().git.branch.return_value = """
* this-branch
other-branch
"""
branch1 = mocker.MagicMock()
branch1.name = "this-branch"
branch2 = mocker.MagicMock()
branch2.name = "other-branch"
branch3 = mocker.MagicMock()
branch3.name = "not-merged-branch"
mocked_git().heads.__iter__.return_value = [branch1, branch2, branch3]
state = json.load(open(temp_configfile))
state["FORK_NAME"] = "peterbe"
with open(temp_configfile, "w") as f:
json.dump(state, f)
runner = CliRunner()
config = Config()
config.configfile = temp_configfile
result = runner.invoke(branches, ["other"], input="\n", obj=config)
if result.exception:
# print(mocked_git.mock_calls)
# print(result.output)
# print(result.exception)
raise result.exception
# print(result.output)
assert "other-branch" in result.output
assert "this-branch" not in result.output
assert result.exit_code == 0
assert not result.exception
# .assert_called_once() is new only in 3.6
# branch2.checkout.assert_called_once()
branch2.checkout.assert_called_with()
| mit |
wkeeling/ansible | test/units/plugins/callback/test_callback.py | 46 | 2749 | # (c) 2012-2014, Chris Meyers <chris.meyers.fsu@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import PY3
from copy import deepcopy
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open
from ansible.plugins.callback import CallbackBase
import ansible.plugins.callback as callish
class TestCopyResultExclude(unittest.TestCase):
def setUp(self):
class DummyClass():
def __init__(self):
self.bar = [ 1, 2, 3 ]
self.a = {
"b": 2,
"c": 3,
}
self.b = {
"c": 3,
"d": 4,
}
self.foo = DummyClass()
self.cb = CallbackBase()
def tearDown(self):
pass
def test_copy_logic(self):
res = self.cb._copy_result_exclude(self.foo, ())
self.assertEqual(self.foo.bar, res.bar)
def test_copy_deep(self):
res = self.cb._copy_result_exclude(self.foo, ())
self.assertNotEqual(id(self.foo.bar), id(res.bar))
def test_no_exclude(self):
res = self.cb._copy_result_exclude(self.foo, ())
self.assertEqual(self.foo.bar, res.bar)
self.assertEqual(self.foo.a, res.a)
self.assertEqual(self.foo.b, res.b)
def test_exclude(self):
res = self.cb._copy_result_exclude(self.foo, ['bar', 'b'])
self.assertIsNone(res.bar)
self.assertIsNone(res.b)
self.assertEqual(self.foo.a, res.a)
def test_result_unmodified(self):
bar_id = id(self.foo.bar)
a_id = id(self.foo.a)
res = self.cb._copy_result_exclude(self.foo, ['bar', 'a'])
self.assertEqual(self.foo.bar, [ 1, 2, 3 ])
self.assertEqual(bar_id, id(self.foo.bar))
self.assertEqual(self.foo.a, dict(b=2, c=3))
self.assertEqual(a_id, id(self.foo.a))
self.assertRaises(AttributeError, self.cb._copy_result_exclude, self.foo, ['a', 'c', 'bar'])
| gpl-3.0 |
softtyphoon/tz | tools/zl/8/patent_query.py | 20 | 11647 |
import urllib
import urllib2
import cookielib
import time
import StringIO
import gzip
import sys
import re
import time
import os
import copy
import zlib
import random
import urlparse
_header = {'User-Agent':'Mozilla/5.0 (Windows NT 5.1; rv:36.0) Gecko/20100101 Firefox/36.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Connection':'keep-alive',
'Referer':'http://epub.sipo.gov.cn/gjcx.jsp',
'Accept-Encoding':'gzip, deflate'
}
_post_data = {'showType':'1',
'strWord':u'',
'numSortMethod':'',
'strLicenseCode':'',
'selected':'',
'numFMGB':'', # 发明公布
'numFMSQ':'', # 发明授权
'numSYXX':'', # 实用新型
'numWGSQ':'', # 外观设计
'pageSize':'10',
'pageNow':'1'
}
html_space = [' ', ' ', ' ', ' ']
_url = 'http://epub.sipo.gov.cn/patentoutline.action'
class patent_query():
def __init__(self, header = _header, path=u'结果.csv'):
self.header = _header
self.post_data = ''
self.url = _url
self.cookie = None
self.copcode = u'0'
self.stcode = u'0'
self.pa = u''
self.types = ['fmgb', 'fmsq', 'xxsq', 'wgsq']
self.type = ['numFMGB', 'numFMSQ', 'numSYXX', 'numWGSQ']
self.type_chn = [u'发明公布', u'发明授权', u'实用新型', u'外观设计']
if os.path.exists(path):
self.fn = open(path, 'a')
else:
header_str = u'公司代码,股票代码,申请人,查询输出名字,专利类型,申请公布号/授权公告,申请号,申请公布日/授权公告日,申请日'
self.fn = open(path, 'w')
self.fn.write(header_str.encode('gbk'))
self.fn.write('\n')
def __del__(self):
self.fn.close()
def save2file(self, result):
'''
把结果写入到文件中
result: [[], [], [], []] 依次为 发明公布, 发明授权, 实用新型, 外观设计
[] 依次为 公布号 公布日 申请号 申请日 申请人
'''
for (index, val) in enumerate(result):
if len(val) == 0:
continue
for i in val:
content = u','.join([self.copcode, self.stcode, self.pa, i[4], self.type_chn[index], i[0], i[2], i[1], i[3]])
self.fn.write(content.encode('gbk'))
self.fn.write('\n')
self.fn.flush()
def run(self, pa=u'', copcode=u'0', stcode=u'0'):
'''
修改:修改由主查询界面直接查询,改为先获得专利信息,然后再分类查询,前者会造成很大的重复
'''
if pa == u'':
print u'请输入申请人'
return
print u'开始处理:', copcode, stcode, pa
self.pa = pa
qword = u'申请(专利权)人=\'%' + pa + '%\''
self.copcode = copcode
self.stcode = stcode
all_list = []
patent_num = range(0, 4)
# 预处理
self.post_data = copy.copy(_post_data)
self.post_data[self.type[0]] = '0'
self.post_data[self.type[1]] = '0'
self.post_data[self.type[2]] = '0'
self.post_data[self.type[3]] = '0'
self.post_data['pageNow'] = '1'
self.post_data['strWord'] = qword.encode('utf-8')
data = urllib.urlencode(self.post_data)
page= ''
while page == '':
page, self.cookie = self.get_page(url_in=self.url, header_in=self.header, data=data, cookie_set=self.cookie)
time.sleep(random.uniform(1.5, 2.5))
# with open('www.txt', 'w+') as f:
# f.write(page)
# f.write(page.encode('gbk'))
pat = re.compile(u'(?<=charset=).+?(?=")', re.DOTALL)
res = pat.findall(page)[0]
if 'utf-8' in res:
page = page.decode('utf-8')
elif 'gbk' in res or 'gb2312' in res:
page = page.decode('gbk')
else:
page = page.decode('utf-8')
pat_list = [u'(?<=发明公布:)\d+?(?=件)', u'(?<=发明授权:)\d+?(?=件)', u'(?<=实用新型:)\d+?(?=件)', u'(?<=外观设计:)\d+?(?=件)']
for (index, i) in enumerate(pat_list):
pat = re.compile(i)
res = pat.findall(page)
if len(res) == 0:
patent_num[index] = '0'
else:
patent_num[index] = res[0]
# print patent_num
# sys.exit(0)
# 开始进行具体专利信息爬取
for (index, i) in enumerate(self.types):
this_list = []
pagenum = 1
while True: # 一直翻页,直到没有下一页为止
if patent_num[index] == '0':
print self.type_chn[index], u'在本项目下没有专利'
break
else:
# 构造数据
self.post_data = copy.copy(_post_data)
self.post_data[self.type[0]] = patent_num[0]
self.post_data[self.type[1]] = patent_num[1]
self.post_data[self.type[2]] = patent_num[2]
self.post_data[self.type[3]] = patent_num[3]
self.post_data['selected'] = i
self.post_data['pageNow'] = str(pagenum)
self.post_data['strWord'] = qword.encode('utf-8')
# print self.post_data
data = urllib.urlencode(self.post_data)
print u'类型:', self.type_chn[index], u'页数:', pagenum, u'数量:', patent_num[index]
page= ''
while page == '':
# print self.url, data
page, self.cookie = self.get_page(url_in=self.url, header_in=self.header, data=data, cookie_set=self.cookie)
time.sleep(random.uniform(1.5, 2.5))
pat = re.compile(u'(?<=charset=).+?(?=")', re.DOTALL)
res = pat.findall(page)[0]
if 'utf-8' in res:
page = page.decode('utf-8')
elif 'gbk' in res or 'gb2312' in res:
page = page.decode('gbk')
# with open('test.txt', 'w+') as f:
# f.write(page.encode('gbk'))
this_list += self.info_extractor(page)
pat = re.compile(u'(?<=<a).+?(?=>></a>)', re.DOTALL)
res = pat.findall(page)
if len(res) == 0:
break
# else:
# print 'go to next page'
# time.sleep(random.uniform(1.5, 2.5))
pagenum += 1
del self.post_data
all_list.append(this_list)
return all_list
def info_extractor(self, page):
'''
提取信息,得到 [专利权人,公布号(授权公布号),申请号,申请公布日/授权公告日,申请日]
1. 取出标签;html解码  
'''
non = u'抱歉,没有您要查询的结果'
if non in page:
with open('test.txt', 'w+') as f:
f.write(page.encode('gbk'))
print u'>>>>>>该项目下没有专利'
return []
info = []
pat_list = [u'(?<=<div class="cp_linr">).+?(?=<div class="cp_jsh">)']
for pat in pat_list:
pat = re.compile(pat, re.DOTALL)
res = pat.findall(page)
if len(res) > 0:
break
if len(res) == 0:
print u'Add new pattern'
sys.exit(0)
for i in res:
# 公布号 公布日 申请号 申请日 申请人
pat = re.compile(u'(?<=:).+?(?=</li>)', re.DOTALL)
cont = pat.findall(i)
for k in range(0, 5):
cont[k] = self.html_tag_remove(cont[k])
if cont[k].find(';') > -1:
cont[k] = cont[k][0:cont[k].find(';')]
info.append(cont[0:5])
# print cont[0:5]
# for i in cont:
# print i,
# print '' <a href="javascript:zl_fy(2);">></a>
return info
def html_tag_remove(self, str):
'''
通用方法,移除tag
'''
pat = re.compile(u'<.+?>', re.DOTALL)
res = pat.findall(str)
str_fix = str
for i in res:
str_fix = str_fix.replace(i, ' ')
for i in html_space:
str_fix = str_fix.replace(i, ' ')
return str_fix
def get_page(self, url_in=None, header_in=None, data=None, cookie_set=None):
'''
通用方法,请求页面
'''
url = url_in
header = header_in
header['Host'] = urlparse.urlparse(url).netloc
opener = urllib2.OpenerDirector()
http_handler = urllib2.HTTPHandler()
https_handler = urllib2.HTTPSHandler()
if cookie_set == None:
cookie = cookielib.CookieJar()
else:
cookie = cookie_set
cookie_handle = urllib2.HTTPCookieProcessor(cookie)
opener.add_handler(http_handler)
opener.add_handler(https_handler)
opener.add_handler(cookie_handle)
req = urllib2.Request(url)
for (name, val) in header.items():
req.add_header(name, val)
if data is not None:
req.add_data(data)
req.add_header(u'Content-Length', len(data))
req.add_header(u'Content-Type', 'application/x-www-form-urlencoded')
try:
time_format = '%Y-%m-%d %X'
# print time.strftime(time_format, time.localtime())
r = ''
r = opener.open(req, timeout = 45)
# Make sure everything is working ;
print r.info().get('Content-Length')
if r.info().get('Transfer-Encoding') == 'chunked':
# print 'chunked'
d = zlib.decompressobj(16+zlib.MAX_WBITS)
content = ''
while True:
data = r.read()
if not data:
break
content += d.decompress(data)
data = content
else:
if r.info().get('Content-Encoding') == 'gzip':
buf = StringIO.StringIO(r.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = r.read()
except KeyboardInterrupt:
print 'EXIT: Keyboard Interrupt'
if r != '':
r.close()
opener.close()
sys.exit(0)
except:
data = ''
time_format = '%Y-%m-%d %X'
# print time.strftime(time_format, time.localtime())
print 'Time Out'
finally:
if r != '':
r.close()
opener.close()
return [data, cookie]
if __name__ == "__main__":
word = u'万科企业股份有限公司'
word = u'中兴通讯股份有限公司'
# with open('test.txt', 'r') as f:
# page = f.read()
a = patent_query()
result = a.run(word)
a.save2file(result)
# a.info_extractor(page.decode('gbk'))
| gpl-2.0 |
CIRCL/AIL-framework | bin/Mail.py | 1 | 7314 | #!/usr/bin/env python3
# -*-coding:UTF-8 -*
"""
The Mails Module
======================
This module is consuming the Redis-list created by the Categ module.
It apply mail regexes on item content and warn if above a threshold.
"""
import os
import re
import sys
import uuid
import redis
import time
import datetime
import dns.resolver
import dns.exception
from multiprocessing import Process as Proc
from pubsublogger import publisher
from Helper import Process
from pyfaup.faup import Faup
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'packages'))
import Item
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib/'))
import ConfigLoader
## LOAD CONFIG ##
config_loader = ConfigLoader.ConfigLoader()
server_statistics = config_loader.get_redis_conn("ARDB_Statistics")
r_serv_cache = config_loader.get_redis_conn("Redis_Cache")
dns_server = config_loader.get_config_str('Mail', 'dns')
config_loader = None
## -- ##
def is_mxdomain_in_cache(mxdomain):
return r_serv_cache.exists('mxdomain:{}'.format(mxdomain))
def save_mxdomain_in_cache(mxdomain):
r_serv_cache.setex('mxdomain:{}'.format(mxdomain), 1, datetime.timedelta(days=1))
def check_mx_record(set_mxdomains, dns_server):
"""Check if emails MX domains are responding.
:param adress_set: -- (set) This is a set of emails domains
:return: (int) Number of adress with a responding and valid MX domains
"""
resolver = dns.resolver.Resolver()
resolver.nameservers = [dns_server]
resolver.timeout = 5.0
resolver.lifetime = 2.0
valid_mxdomain = []
for mxdomain in set_mxdomains:
# check if is in cache
# # TODO:
if is_mxdomain_in_cache(mxdomain):
valid_mxdomain.append(mxdomain)
else:
# DNS resolution
try:
answers = resolver.query(mxdomain, rdtype=dns.rdatatype.MX)
if answers:
save_mxdomain_in_cache(mxdomain)
valid_mxdomain.append(mxdomain)
# DEBUG
# print('---')
# print(answers.response)
# print(answers.qname)
# print(answers.rdtype)
# print(answers.rdclass)
# print(answers.nameserver)
# print()
except dns.resolver.NoNameservers:
publisher.debug('NoNameserver, No non-broken nameservers are available to answer the query.')
print('NoNameserver, No non-broken nameservers are available to answer the query.')
except dns.resolver.NoAnswer:
publisher.debug('NoAnswer, The response did not contain an answer to the question.')
print('NoAnswer, The response did not contain an answer to the question.')
except dns.name.EmptyLabel:
publisher.debug('SyntaxError: EmptyLabel')
print('SyntaxError: EmptyLabel')
except dns.resolver.NXDOMAIN:
#save_mxdomain_in_cache(mxdomain)
publisher.debug('The query name does not exist.')
print('The query name does not exist.')
except dns.name.LabelTooLong:
publisher.debug('The Label is too long')
print('The Label is too long')
except dns.exception.Timeout:
print('dns timeout')
#save_mxdomain_in_cache(mxdomain)
except Exception as e:
print(e)
return valid_mxdomain
def extract_all_emails(redis_key, item_content):
all_emails = re.findall(email_regex, item_content)
if len(all_emails) > 1:
r_serv_cache.sadd(redis_key, *all_emails)
r_serv_cache.expire(redis_key, 360)
elif all_emails:
r_serv_cache.sadd(redis_key, all_emails[0])
r_serv_cache.expire(redis_key, 360)
if __name__ == "__main__":
publisher.port = 6380
publisher.channel = "Script"
config_section = 'Mail'
faup = Faup()
p = Process(config_section)
publisher.info("Mails module started")
# Numbers of Mails needed to Tags
mail_threshold = 10
max_execution_time = 30
email_regex = "[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}"
redis_key = 'mail_extracted:{}'.format(str(uuid.uuid4()))
while True:
message = p.get_from_set()
if message is not None:
item_id, score = message.split()
print(item_id)
item_content = Item.get_item_content(item_id)
proc = Proc(target=extract_all_emails, args=(redis_key, item_content, ))
try:
proc.start()
proc.join(max_execution_time)
if proc.is_alive():
proc.terminate()
p.incr_module_timeout_statistic()
err_mess = "Mails: processing timeout: {}".format(item_id)
print(err_mess)
publisher.info(err_mess)
continue
else:
all_emails = r_serv_cache.smembers(redis_key)
r_serv_cache.delete(redis_key)
proc.terminate()
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
proc.terminate()
sys.exit(0)
# get MXdomains
set_mxdomains = set()
dict_mxdomains_email = {}
for email in all_emails:
mxdomain = email.split('@')[1].lower()
if not mxdomain in dict_mxdomains_email:
dict_mxdomains_email[mxdomain] = []
set_mxdomains.add(mxdomain)
dict_mxdomains_email[mxdomain].append(email)
## TODO: add MAIL trackers
valid_mx = check_mx_record(set_mxdomains, dns_server)
item_date = Item.get_item_date(item_id)
num_valid_email = 0
for domain_mx in valid_mx:
num_valid_email += len(dict_mxdomains_email[domain_mx])
for email in dict_mxdomains_email[domain_mx]:
msg = 'mail;{};{};{}'.format(1, email, item_date)
p.populate_set_out(msg, 'ModuleStats')
# Create country stats
faup.decode(email)
tld = faup.get()['tld']
try:
tld = tld.decode()
except:
pass
server_statistics.hincrby('mail_by_tld:{}'.format(item_date), tld, 1)
msg = 'Mails;{};{};{};Checked {} e-mail(s);{}'.format(Item.get_source(item_id), item_date, Item.get_item_basename(item_id), num_valid_email, item_id)
if num_valid_email > mail_threshold:
print('{} Checked {} e-mail(s)'.format(item_id, num_valid_email))
publisher.warning(msg)
#Send to duplicate
p.populate_set_out(item_id, 'Duplicate')
#tags
msg = 'infoleak:automatic-detection="mail";{}'.format(item_id)
p.populate_set_out(msg, 'Tags')
else:
publisher.info(msg)
else:
time.sleep(10)
| agpl-3.0 |
sassoftware/anaconda | pyanaconda/timezone.py | 3 | 6359 | #
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <vpodzime@redhat.com>
#
"""
Module providing functions for getting the list of timezones, writing timezone
configuration, valid timezones recognition etc.
"""
import os
import pytz
import langtable
from collections import OrderedDict
from pyanaconda import iutil
from pyanaconda.constants import THREAD_STORAGE
from pyanaconda.flags import flags
from pyanaconda.threads import threadMgr
from blivet import arch
import logging
log = logging.getLogger("anaconda")
# The following zones are not in pytz.common_timezones and
# Etc category in pytz.all_timezones includes some more,
# however confusing ones (like UCT, GMT+0, GMT-0,...)
ETC_ZONES = ['GMT+1', 'GMT+2', 'GMT+3', 'GMT+4', 'GMT+5', 'GMT+6', 'GMT+7',
'GMT+8', 'GMT+9', 'GMT+10', 'GMT+11', 'GMT+12',
'GMT-1', 'GMT-2', 'GMT-3', 'GMT-4', 'GMT-5', 'GMT-6', 'GMT-7',
'GMT-8', 'GMT-9', 'GMT-10', 'GMT-11', 'GMT-12', 'GMT-13',
'GMT-14', 'UTC', 'GMT']
NTP_PACKAGE = "chrony"
NTP_SERVICE = "chronyd"
class TimezoneConfigError(Exception):
"""Exception class for timezone configuration related problems"""
pass
def time_initialize(timezone, storage, bootloader):
"""
Try to guess if RTC uses UTC time or not, set timezone.isUtc properly and
set system time from RTC using the UTC guess.
Guess is done by searching for bootable ntfs devices.
:param timezone: ksdata.timezone object
:param storage: blivet.Blivet instance
:param bootloader: bootloader.Bootloader instance
"""
if arch.isS390():
# nothing to do on s390 where hwclock doesn't exist
return
if not timezone.isUtc and not flags.automatedInstall:
# if set in the kickstart, no magic needed here
threadMgr.wait(THREAD_STORAGE)
ntfs_devs = filter(lambda dev: dev.format.name == "ntfs",
storage.devices)
timezone.isUtc = not bootloader.has_windows(ntfs_devs)
cmd = "hwclock"
args = ["--hctosys"]
if timezone.isUtc:
args.append("--utc")
else:
args.append("--localtime")
iutil.execWithRedirect(cmd, args)
def write_timezone_config(timezone, root):
"""
Write timezone configuration for the system specified by root.
:param timezone: ksdata.timezone object
:param root: path to the root
:raise: TimezoneConfigError
"""
# we want to create a relative symlink
tz_file = "/usr/share/zoneinfo/" + timezone.timezone
rooted_tz_file = os.path.normpath(root + tz_file)
relative_path = os.path.normpath("../" + tz_file)
link_path = os.path.normpath(root + "/etc/localtime")
if not os.access(rooted_tz_file, os.R_OK):
log.error("Timezone to be linked (%s) doesn't exist", rooted_tz_file)
else:
try:
# os.symlink fails if link_path exists, so try to remove it first
os.remove(link_path)
except OSError:
pass
try:
os.symlink(relative_path, link_path)
except OSError as oserr:
log.error("Error when symlinking timezone (from %s): %s",
rooted_tz_file, oserr.strerror)
try:
fobj = open(os.path.normpath(root + "/etc/adjtime"), "r")
lines = fobj.readlines()
fobj.close()
except IOError:
lines = [ "0.0 0 0.0\n", "0\n" ]
try:
with open(os.path.normpath(root + "/etc/adjtime"), "w") as fobj:
fobj.write(lines[0])
fobj.write(lines[1])
if timezone.isUtc:
fobj.write("UTC\n")
else:
fobj.write("LOCAL\n")
except IOError as ioerr:
msg = "Error while writing /etc/adjtime file: %s" % ioerr.strerror
raise TimezoneConfigError(msg)
def save_hw_clock(timezone):
"""
Save system time to HW clock.
:param timezone: ksdata.timezone object
"""
if arch.isS390():
return
cmd = "hwclock"
args = ["--systohc"]
if timezone.isUtc:
args.append("--utc")
else:
args.append("--local")
iutil.execWithRedirect(cmd, args)
def get_preferred_timezone(territory):
"""
Get the preferred timezone for a given territory. Note that this function
simply returns the first timezone in the list of timezones for a given
territory.
:param territory: territory to get preferred timezone for
:type territory: str
:return: preferred timezone for the given territory or None if no found
:rtype: str or None
"""
timezones = langtable.list_timezones(territoryId=territory)
if not timezones:
return None
return timezones[0]
def get_all_regions_and_timezones():
"""
Get a dictionary mapping the regions to the list of their timezones.
:rtype: dict
"""
result = OrderedDict()
for tz in pytz.common_timezones:
parts = tz.split("/", 1)
if len(parts) > 1:
if parts[0] not in result:
result[parts[0]] = set()
result[parts[0]].add(parts[1])
result["Etc"] = set(ETC_ZONES)
return result
def is_valid_timezone(timezone):
"""
Check if a given string is an existing timezone.
:type timezone: str
:rtype: bool
"""
etc_zones = ["Etc/" + zone for zone in ETC_ZONES]
return timezone in pytz.common_timezones + etc_zones
| gpl-2.0 |
Tendrl/commons | tendrl/commons/objects/node/atoms/is_node_tendrl_managed/__init__.py | 1 | 5397 | import etcd
from tendrl.commons import objects
from tendrl.commons.objects import AtomExecutionFailedError
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import log_utils as logger
class IsNodeTendrlManaged(objects.BaseAtom):
def __init__(self, *args, **kwargs):
super(IsNodeTendrlManaged, self).__init__(*args, **kwargs)
def run(self):
node_ids = self.parameters.get('Node[]')
if not node_ids or len(node_ids) == 0:
raise AtomExecutionFailedError("Node[] cannot be empty")
for node_id in node_ids:
# Check if node has the OS details populated
try:
os_details = etcd_utils.read("nodes/%s/Os" % node_id)
if os_details.leaves is None:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have OS details "
"populated" % NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
except etcd.EtcdKeyNotFound:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have OS details "
"populated" %
NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
# Check if node has the CPU details populated
try:
cpu_details = etcd_utils.read("nodes/%s/Cpu" % node_id)
if cpu_details.leaves is None:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have CPU details "
"populated" % NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
except etcd.EtcdKeyNotFound:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have CPU details "
"populated" % NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
# Check if node has the Memory populated
try:
memory_details = etcd_utils.read(
"nodes/%s/Memory" % node_id
)
if memory_details.leaves is None:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have Memory details "
"populated" % NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
except etcd.EtcdKeyNotFound:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have Memory details "
"populated" % NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
# Check if node has networks details populated
try:
networks = etcd_utils.read("nodes/%s/Networks" % node_id)
if networks.leaves is None:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have network details "
"populated" % NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
except etcd.EtcdKeyNotFound:
logger.log(
"error",
NS.get("publisher_id", None),
{
"message": "Node %s doesn't have network details "
"populated" % NS.node_context.fqdn
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return False
return True
| lgpl-2.1 |
mortonjt/scipy | scipy/optimize/_differentialevolution.py | 12 | 28955 | """
differential_evolution: The differential evolution global optimization algorithm
Added by Andrew Nelson 2014
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult, minimize
from scipy.optimize.optimize import _status_message
import numbers
__all__ = ['differential_evolution']
_MACHEPS = np.finfo(np.float64).eps
def differential_evolution(func, bounds, args=(), strategy='best1bin',
maxiter=None, popsize=15, tol=0.01,
mutation=(0.5, 1), recombination=0.7, seed=None,
callback=None, disp=False, polish=True,
init='latinhypercube'):
"""Finds the global minimum of a multivariate function.
Differential Evolution is stochastic in nature (does not use gradient
methods) to find the minimium, and can search large areas of candidate
space, but often requires larger numbers of function evaluations than
conventional gradient based techniques.
The algorithm is due to Storn and Price [1]_.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining the lower and upper bounds for the optimizing argument of
`func`. It is required to have ``len(bounds) == len(x)``.
``len(bounds)`` is used to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'.
maxiter : int, optional
The maximum number of times the entire population is evolved.
The maximum number of function evaluations is:
``maxiter * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals.
tol : float, optional
When the mean of the population energies, multiplied by tol,
divided by the standard deviation of the population energies
is greater than 1 the solving process terminates:
``convergence = mean(pop) * tol / stdev(pop) > 1``
mutation : float or tuple(float, float), optional
The mutation constant.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
``U[min, max)``. Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. Increasing
this value allows a larger number of mutants to progress into the next
generation, but at the risk of population stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.RandomState` singleton is used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with seed.
If `seed` is already a `np.random.RandomState instance`, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Display status messages
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly.
init : string, optional
Specify how the population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space. 'random' initializes
the population randomly - this has the drawback that clustering can
occur, preventing the whole of parameter space being covered.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, then OptimizeResult also contains the `jac` attribute.
Notes
-----
Differential evolution is a stochastic population based method that is
useful for global optimization problems. At each pass through the population
the algorithm mutates each candidate solution by mixing with other candidate
solutions to create a trial candidate. There are several strategies [2]_ for
creating trial candidates, which suit some problems more than others. The
'best1bin' strategy is a good starting point for many systems. In this
strategy two members of the population are randomly chosen. Their difference
is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
so far:
.. math::
b' = b_0 + mutation * (population[rand0] - population[rand1])
A trial vector is then constructed. Starting with a randomly chosen 'i'th
parameter the trial is sequentially filled (in modulo) with parameters from
`b'` or the original candidate. The choice of whether to use `b'` or the
original candidate is made with a binomial distribution (the 'bin' in
'best1bin') - a random number in [0, 1) is generated. If this number is
less than the `recombination` constant then the parameter is loaded from
`b'`, otherwise it is loaded from the original candidate. The final
parameter is always loaded from `b'`. Once the trial candidate is built
its fitness is assessed. If the trial is better than the original candidate
then it takes its place. If it is also better than the best overall
candidate it also replaces that.
To improve your chances of finding a global minimum use higher `popsize`
values, with higher `mutation` and (dithering), but lower `recombination`
values. This has the effect of widening the search radius, but slowing
convergence.
.. versionadded:: 0.15.0
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function is implemented in `rosen` in `scipy.optimize`.
>>> from scipy.optimize import rosen, differential_evolution
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
>>> result = differential_evolution(rosen, bounds)
>>> result.x, result.fun
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
Next find the minimum of the Ackley function
(http://en.wikipedia.org/wiki/Test_functions_for_optimization).
>>> from scipy.optimize import differential_evolution
>>> import numpy as np
>>> def ackley(x):
... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
>>> bounds = [(-5, 5), (-5, 5)]
>>> result = differential_evolution(ackley, bounds)
>>> result.x, result.fun
(array([ 0., 0.]), 4.4408920985006262e-16)
References
----------
.. [1] Storn, R and Price, K, Differential Evolution - a Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces,
Journal of Global Optimization, 1997, 11, 341 - 359.
.. [2] http://www1.icsi.berkeley.edu/~storn/code.html
.. [3] http://en.wikipedia.org/wiki/Differential_evolution
"""
solver = DifferentialEvolutionSolver(func, bounds, args=args,
strategy=strategy, maxiter=maxiter,
popsize=popsize, tol=tol,
mutation=mutation,
recombination=recombination,
seed=seed, polish=polish,
callback=callback,
disp=disp,
init=init)
return solver.solve()
class DifferentialEvolutionSolver(object):
"""This class implements the differential evolution solver
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining the lower and upper bounds for the optimizing argument of
`func`. It is required to have ``len(bounds) == len(x)``.
``len(bounds)`` is used to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'
maxiter : int, optional
The maximum number of times the entire population is evolved. The
maximum number of function evaluations is:
``maxiter * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals.
tol : float, optional
When the mean of the population energies, multiplied by tol,
divided by the standard deviation of the population energies
is greater than 1 the solving process terminates:
``convergence = mean(pop) * tol / stdev(pop) > 1``
mutation : float or tuple(float, float), optional
The mutation constant.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
U[min, max). Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. Increasing
this value allows a larger number of mutants to progress into the next
generation, but at the risk of population stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.random.RandomState` singleton is
used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with `seed`.
If `seed` is already a `np.random.RandomState` instance, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Display status messages
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method
is used to polish the best population member at the end. This requires
a few more function evaluations.
maxfun : int, optional
Set the maximum number of function evaluations. However, it probably
makes more sense to set `maxiter` instead.
init : string, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
"""
# Dispatch of mutation strategy method (binomial or exponential).
_binomial = {'best1bin': '_best1',
'randtobest1bin': '_randtobest1',
'best2bin': '_best2',
'rand2bin': '_rand2',
'rand1bin': '_rand1'}
_exponential = {'best1exp': '_best1',
'rand1exp': '_rand1',
'randtobest1exp': '_randtobest1',
'best2exp': '_best2',
'rand2exp': '_rand2'}
def __init__(self, func, bounds, args=(),
strategy='best1bin', maxiter=None, popsize=15,
tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
maxfun=None, callback=None, disp=False, polish=True,
init='latinhypercube'):
if strategy in self._binomial:
self.mutation_func = getattr(self, self._binomial[strategy])
elif strategy in self._exponential:
self.mutation_func = getattr(self, self._exponential[strategy])
else:
raise ValueError("Please select a valid mutation strategy")
self.strategy = strategy
self.callback = callback
self.polish = polish
self.tol = tol
#Mutation constant should be in [0, 2). If specified as a sequence
#then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
np.any(np.array(mutation) < 0)):
raise ValueError('The mutation constant must be a float in '
'U[0, 2), or specified as a tuple(min, max)'
' where min < max and min, max are in U[0, 2).')
self.dither = None
if hasattr(mutation, '__iter__') and len(mutation) > 1:
self.dither = [mutation[0], mutation[1]]
self.dither.sort()
self.cross_over_probability = recombination
self.func = func
self.args = args
# convert tuple of lower and upper bounds to limits
# [(low_0, high_0), ..., (low_n, high_n]
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
self.limits = np.array(bounds, dtype='float').T
if (np.size(self.limits, 0) != 2
or not np.all(np.isfinite(self.limits))):
raise ValueError('bounds should be a sequence containing '
'real valued (min, max) pairs for each value'
' in x')
self.maxiter = maxiter or 1000
self.maxfun = (maxfun or ((self.maxiter + 1) * popsize *
np.size(self.limits, 1)))
# population is scaled to between [0, 1].
# We have to scale between parameter <-> population
# save these arguments for _scale_parameter and
# _unscale_parameter. This is an optimization
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
parameter_count = np.size(self.limits, 1)
self.random_number_generator = _make_random_gen(seed)
#default initialization is a latin hypercube design, but there
#are other population initializations possible.
self.population = np.zeros((popsize * parameter_count,
parameter_count))
if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'random':
self.init_population_random()
else:
raise ValueError("The population initialization method must be one"
"of 'latinhypercube' or 'random'")
self.population_energies = np.ones(
popsize * parameter_count) * np.inf
self.disp = disp
def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling
Latin Hypercube Sampling ensures that the sampling of parameter space
is maximised.
"""
samples = np.size(self.population, 0)
N = np.size(self.population, 1)
rng = self.random_number_generator
# Generate the intervals
segsize = 1.0 / samples
# Fill points uniformly in each interval
rdrange = rng.rand(samples, N) * segsize
rdrange += np.atleast_2d(
np.linspace(0., 1., samples, endpoint=False)).T
# Make the random pairings
self.population = np.zeros_like(rdrange)
for j in range(N):
order = rng.permutation(range(samples))
self.population[:, j] = rdrange[order, j]
def init_population_random(self):
"""
Initialises the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.random_sample(self.population.shape)
@property
def x(self):
"""
The best solution from the solver
Returns
-------
x - ndarray
The best solution from the solver.
"""
return self._scale_parameters(self.population[0])
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If polish
was employed, then OptimizeResult also contains the ``hess_inv`` and
``jac`` attributes.
"""
nfev, nit, warning_flag = 0, 0, False
status_message = _status_message['success']
# calculate energies to start with
for index, candidate in enumerate(self.population):
parameters = self._scale_parameters(candidate)
self.population_energies[index] = self.func(parameters,
*self.args)
nfev += 1
if nfev > self.maxfun:
warning_flag = True
status_message = _status_message['maxfev']
break
minval = np.argmin(self.population_energies)
# put the lowest energy into the best solution position.
lowest_energy = self.population_energies[minval]
self.population_energies[minval] = self.population_energies[0]
self.population_energies[0] = lowest_energy
self.population[[0, minval], :] = self.population[[minval, 0], :]
if warning_flag:
return OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
# do the optimisation.
for nit in range(1, self.maxiter + 1):
if self.dither is not None:
self.scale = self.random_number_generator.rand(
) * (self.dither[1] - self.dither[0]) + self.dither[0]
for candidate in range(np.size(self.population, 0)):
if nfev > self.maxfun:
warning_flag = True
status_message = _status_message['maxfev']
break
trial = self._mutate(candidate)
self._ensure_constraint(trial)
parameters = self._scale_parameters(trial)
energy = self.func(parameters, *self.args)
nfev += 1
if energy < self.population_energies[candidate]:
self.population[candidate] = trial
self.population_energies[candidate] = energy
if energy < self.population_energies[0]:
self.population_energies[0] = energy
self.population[0] = trial
# stop when the fractional s.d. of the population is less than tol
# of the mean energy
convergence = (np.std(self.population_energies) /
np.abs(np.mean(self.population_energies) +
_MACHEPS))
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
if (self.callback and
self.callback(self._scale_parameters(self.population[0]),
convergence=self.tol / convergence) is True):
warning_flag = True
status_message = ('callback function requested stop early '
'by returning True')
break
if convergence < self.tol or warning_flag:
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
if self.polish:
result = minimize(self.func,
np.copy(DE_result.x),
method='L-BFGS-B',
bounds=self.limits.T,
args=self.args)
nfev += result.nfev
DE_result.nfev = nfev
if result.fun < DE_result.fun:
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
return DE_result
def _scale_parameters(self, trial):
"""
scale from a number between 0 and 1 to parameters
"""
return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
def _unscale_parameters(self, parameters):
"""
scale from parameters to a number between 0 and 1.
"""
return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
def _ensure_constraint(self, trial):
"""
make sure the parameters lie between the limits
"""
for index, param in enumerate(trial):
if param > 1 or param < 0:
trial[index] = self.random_number_generator.rand()
def _mutate(self, candidate):
"""
create a trial vector based on a mutation strategy
"""
trial = np.copy(self.population[candidate])
parameter_count = np.size(trial, 0)
fill_point = self.random_number_generator.randint(0, parameter_count)
if (self.strategy == 'randtobest1exp'
or self.strategy == 'randtobest1bin'):
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = self.random_number_generator.rand(parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
while (i < parameter_count and
self.random_number_generator.rand() <
self.cross_over_probability):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % parameter_count
i += 1
return trial
def _best1(self, samples):
"""
best1bin, best1exp
"""
r0, r1 = samples[:2]
return (self.population[0] + self.scale *
(self.population[r0] - self.population[r1]))
def _rand1(self, samples):
"""
rand1bin, rand1exp
"""
r0, r1, r2 = samples[:3]
return (self.population[r0] + self.scale *
(self.population[r1] - self.population[r2]))
def _randtobest1(self, candidate, samples):
"""
randtobest1bin, randtobest1exp
"""
r0, r1 = samples[:2]
bprime = np.copy(self.population[candidate])
bprime += self.scale * (self.population[0] - bprime)
bprime += self.scale * (self.population[r0] -
self.population[r1])
return bprime
def _best2(self, samples):
"""
best2bin, best2exp
"""
r0, r1, r2, r3 = samples[:4]
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1]
- self.population[r2] - self.population[r3]))
return bprime
def _rand2(self, samples):
"""
rand2bin, rand2exp
"""
r0, r1, r2, r3, r4 = samples
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
return bprime
def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(np.size(self.population, 0)),
without replacement. You can't have the original candidate either.
"""
idxs = list(range(np.size(self.population, 0)))
idxs.remove(candidate)
self.random_number_generator.shuffle(idxs)
idxs = idxs[:number_samples]
return idxs
def _make_random_gen(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
| bsd-3-clause |
wangyum/tensorflow | tensorflow/python/debug/lib/session_debug_file_test.py | 48 | 5016 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session with file:// URLs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SessionDebugTest(session_debug_testlib.SessionDebugTestBase):
def _debug_urls(self, run_number=None):
return ["file://%s" % self._debug_dump_dir(run_number=run_number)]
def _debug_dump_dir(self, run_number=None):
if run_number is None:
return self._dump_root
else:
return os.path.join(self._dump_root, "run_%d" % run_number)
def testAllowsDifferentWatchesOnDifferentRuns(self):
"""Test watching different tensors on different runs of the same graph."""
with session.Session() as sess:
u_init_val = [[5.0, 3.0], [-1.0, 0.0]]
v_init_val = [[2.0], [-1.0]]
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "diff_Watch/u"
v_name = "diff_Watch/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name="diff_Watch/matmul")
u.initializer.run()
v.initializer.run()
for i in range(2):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_dump_root = self._debug_dump_dir(run_number=i)
debug_urls = self._debug_urls(run_number=i)
if i == 0:
# First debug run: Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
else:
# Second debug run: Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
run_dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# Each run should have generated only one dumped tensor, not two.
self.assertEqual(1, dump.size)
if i == 0:
self.assertAllClose([u_init_val],
dump.get_tensors("%s/read" % u_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % u_name, 0,
"DebugIdentity")[0], 0)
else:
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % v_name, 0,
"DebugIdentity")[0], 0)
class SessionDebugConcurrentTest(
session_debug_testlib.DebugConcurrentRunCallsTest):
def setUp(self):
self._num_concurrent_runs = 3
self._dump_roots = []
for _ in range(self._num_concurrent_runs):
self._dump_roots.append(tempfile.mkdtemp())
def tearDown(self):
ops.reset_default_graph()
for dump_root in self._dump_roots:
if os.path.isdir(dump_root):
shutil.rmtree(dump_root)
def _get_concurrent_debug_urls(self):
return [("file://%s" % dump_root) for dump_root in self._dump_roots]
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
rajashreer7/autotest-client-tests | linux-tools/gnome_vfs2/gnome_vfs2.py | 4 | 1254 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class gnome_vfs2(test.test):
"""
Autotest module for testing basic functionality
of gnome_vfs2
@author Ramesh YR, rameshyr@linux.vnet.ibm.com ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./gnome-vfs2.sh'], cwd="%s/gnome_vfs2" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
browseinfo/odoo_saas3_nicolas | addons/decimal_precision/tests/test_qweb_float.py | 103 | 2000 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestFloatExport(common.TransactionCase):
def setUp(self):
super(TestFloatExport, self).setUp()
self.Model = self.registry('decimal.precision.test')
def get_converter(self, name):
converter = self.registry('ir.qweb.field.float')
column = self.Model._all_columns[name].column
return lambda value, options=None: converter.value_to_html(
self.cr, self.uid, value, column, options=options, context=None)
def test_basic_float(self):
converter = self.get_converter('float')
self.assertEqual(
converter(42.0),
"42.0")
self.assertEqual(
converter(42.12345),
"42.12345")
converter = self.get_converter('float_2')
self.assertEqual(
converter(42.0),
"42.00")
self.assertEqual(
converter(42.12345),
"42.12")
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.0),
'42.0000')
self.assertEqual(
converter(42.12345),
'42.1234')
def test_precision_domain(self):
DP = self.registry('decimal.precision')
DP.create(self.cr, self.uid, {
'name': 'A',
'digits': 2,
})
DP.create(self.cr, self.uid, {
'name': 'B',
'digits': 6,
})
converter = self.get_converter('float')
self.assertEqual(
converter(42.0, {'decimal_precision': 'A'}),
'42.00')
self.assertEqual(
converter(42.0, {'decimal_precision': 'B'}),
'42.000000')
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'A'}),
'42.12')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'B'}),
'42.123450')
| agpl-3.0 |
nanolearningllc/edx-platform-cypress-2 | common/lib/xmodule/xmodule/modulestore/tests/test_cross_modulestore_import_export.py | 29 | 19585 | """
This suite of tests verifies that courses exported from one modulestore can be imported into
another modulestore and the result will be identical (ignoring changes to identifiers that are
the result of being imported into a course with a different course id).
It does this by providing facilities for creating and cleaning up each of the modulestore types,
and then for each combination of modulestores, performing the sequence:
1) use xml_importer to read a course from xml from disk into the first modulestore (called the source)
2) use xml_exporter to dump the course from the source modulestore to disk
3) use xml_importer to read the dumped course into a second modulestore (called the destination)
4) Compare all modules in the source and destination modulestores to make sure that they line up
"""
from contextlib import contextmanager, nested
import itertools
import os
from path import Path as path
import random
from shutil import rmtree
from tempfile import mkdtemp
import ddt
from nose.plugins.attrib import attr
from mock import patch
from xmodule.tests import CourseComparisonTest
from xmodule.modulestore.mongo.base import ModuleStoreEnum
from xmodule.modulestore.mongo.draft import DraftModuleStore
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.contentstore.mongo import MongoContentStore
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.split_mongo.split_draft import DraftVersioningModuleStore
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.utils import mock_tab_from_json
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.partitions.tests.test_partitions import PartitionTestCase
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.xml import XMLModuleStore
TEST_DATA_DIR = 'common/test/data/'
COMMON_DOCSTORE_CONFIG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
}
DATA_DIR = path(__file__).dirname().parent.parent / "tests" / "data" / "xml-course-root"
XBLOCK_MIXINS = (InheritanceMixin, XModuleMixin)
class MemoryCache(object):
"""
This fits the metadata_inheritance_cache_subsystem interface used by
the modulestore, and stores the data in a dictionary in memory.
"""
def __init__(self):
self._data = {}
def get(self, key, default=None):
"""
Get a key from the cache.
Args:
key: The key to update.
default: The value to return if the key hasn't been set previously.
"""
return self._data.get(key, default)
def set(self, key, value):
"""
Set a key in the cache.
Args:
key: The key to update.
value: The value change the key to.
"""
self._data[key] = value
class MongoContentstoreBuilder(object):
"""
A builder class for a MongoContentStore.
"""
@contextmanager
def build(self):
"""
A contextmanager that returns a MongoContentStore, and deletes its contents
when the context closes.
"""
contentstore = MongoContentStore(
db='contentstore{}'.format(random.randint(0, 10000)),
collection='content',
**COMMON_DOCSTORE_CONFIG
)
contentstore.ensure_indexes()
try:
yield contentstore
finally:
# Delete the created database
contentstore._drop_database() # pylint: disable=protected-access
def __repr__(self):
return 'MongoContentstoreBuilder()'
class StoreBuilderBase(object):
"""
Base class for all modulestore builders.
"""
@contextmanager
def build(self, **kwargs):
"""
Build the modulstore, optionally building the contentstore as well.
"""
contentstore = kwargs.pop('contentstore', None)
if not contentstore:
with self.build_without_contentstore() as (contentstore, modulestore):
yield contentstore, modulestore
else:
with self.build_with_contentstore(contentstore) as modulestore:
yield modulestore
@contextmanager
def build_without_contentstore(self):
"""
Build both the contentstore and the modulestore.
"""
with MongoContentstoreBuilder().build() as contentstore:
with self.build_with_contentstore(contentstore) as modulestore:
yield contentstore, modulestore
class MongoModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a DraftModuleStore.
"""
@contextmanager
def build_with_contentstore(self, contentstore):
"""
A contextmanager that returns an isolated mongo modulestore, and then deletes
all of its data at the end of the context.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
doc_store_config = dict(
db='modulestore{}'.format(random.randint(0, 10000)),
collection='xmodule',
asset_collection='asset_metadata',
**COMMON_DOCSTORE_CONFIG
)
# Set up a temp directory for storing filesystem content created during import
fs_root = mkdtemp()
# pylint: disable=attribute-defined-outside-init
modulestore = DraftModuleStore(
contentstore,
doc_store_config,
fs_root,
render_template=repr,
branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
metadata_inheritance_cache_subsystem=MemoryCache(),
xblock_mixins=XBLOCK_MIXINS,
)
modulestore.ensure_indexes()
try:
yield modulestore
finally:
# Delete the created database
modulestore._drop_database() # pylint: disable=protected-access
# Delete the created directory on the filesystem
rmtree(fs_root, ignore_errors=True)
def __repr__(self):
return 'MongoModulestoreBuilder()'
class VersioningModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a VersioningModuleStore.
"""
@contextmanager
def build_with_contentstore(self, contentstore):
"""
A contextmanager that returns an isolated versioning modulestore, and then deletes
all of its data at the end of the context.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
# pylint: disable=unreachable
doc_store_config = dict(
db='modulestore{}'.format(random.randint(0, 10000)),
collection='split_module',
**COMMON_DOCSTORE_CONFIG
)
# Set up a temp directory for storing filesystem content created during import
fs_root = mkdtemp()
modulestore = DraftVersioningModuleStore(
contentstore,
doc_store_config,
fs_root,
render_template=repr,
xblock_mixins=XBLOCK_MIXINS,
)
modulestore.ensure_indexes()
try:
yield modulestore
finally:
# Delete the created database
modulestore._drop_database() # pylint: disable=protected-access
# Delete the created directory on the filesystem
rmtree(fs_root, ignore_errors=True)
def __repr__(self):
return 'SplitModulestoreBuilder()'
class XmlModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a XMLModuleStore.
"""
# pylint: disable=unused-argument
@contextmanager
def build_with_contentstore(self, contentstore=None, course_ids=None):
"""
A contextmanager that returns an isolated xml modulestore
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
modulestore = XMLModuleStore(
DATA_DIR,
course_ids=course_ids,
default_class='xmodule.hidden_module.HiddenDescriptor',
xblock_mixins=XBLOCK_MIXINS,
)
yield modulestore
class MixedModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a MixedModuleStore.
"""
def __init__(self, store_builders, mappings=None):
"""
Args:
store_builders: A list of modulestore builder objects. These will be instantiated, in order,
as the backing stores for the MixedModuleStore.
mappings: Any course mappings to pass to the MixedModuleStore on instantiation.
"""
self.store_builders = store_builders
self.mappings = mappings or {}
self.mixed_modulestore = None
@contextmanager
def build_with_contentstore(self, contentstore):
"""
A contextmanager that returns a mixed modulestore built on top of modulestores
generated by other builder classes.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
names, generators = zip(*self.store_builders)
with nested(*(gen.build_with_contentstore(contentstore) for gen in generators)) as modulestores:
# Make the modulestore creation function just return the already-created modulestores
store_iterator = iter(modulestores)
create_modulestore_instance = lambda *args, **kwargs: store_iterator.next()
# Generate a fake list of stores to give the already generated stores appropriate names
stores = [{'NAME': name, 'ENGINE': 'This space deliberately left blank'} for name in names]
self.mixed_modulestore = MixedModuleStore(
contentstore,
self.mappings,
stores,
create_modulestore_instance=create_modulestore_instance,
xblock_mixins=XBLOCK_MIXINS,
)
yield self.mixed_modulestore
def __repr__(self):
return 'MixedModulestoreBuilder({!r}, {!r})'.format(self.store_builders, self.mappings)
def asset_collection(self):
"""
Returns the collection storing the asset metadata.
"""
all_stores = self.mixed_modulestore.modulestores
if len(all_stores) > 1:
return None
store = all_stores[0]
if hasattr(store, 'asset_collection'):
# Mongo modulestore beneath mixed.
# Returns the entire collection with *all* courses' asset metadata.
return store.asset_collection
else:
# Split modulestore beneath mixed.
# Split stores all asset metadata in the structure collection.
return store.db_connection.structures
MIXED_MODULESTORE_BOTH_SETUP = MixedModulestoreBuilder([
('draft', MongoModulestoreBuilder()),
('split', VersioningModulestoreBuilder())
])
DRAFT_MODULESTORE_SETUP = MixedModulestoreBuilder([('draft', MongoModulestoreBuilder())])
SPLIT_MODULESTORE_SETUP = MixedModulestoreBuilder([('split', VersioningModulestoreBuilder())])
MIXED_MODULESTORE_SETUPS = (
DRAFT_MODULESTORE_SETUP,
SPLIT_MODULESTORE_SETUP,
)
MIXED_MS_SETUPS_SHORT = (
'mixed_mongo',
'mixed_split',
)
DIRECT_MODULESTORE_SETUPS = (
MongoModulestoreBuilder(),
# VersioningModulestoreBuilder(), # FUTUREDO: LMS-11227
)
DIRECT_MS_SETUPS_SHORT = (
'mongo',
#'split',
)
MODULESTORE_SETUPS = DIRECT_MODULESTORE_SETUPS + MIXED_MODULESTORE_SETUPS
MODULESTORE_SHORTNAMES = DIRECT_MS_SETUPS_SHORT + MIXED_MS_SETUPS_SHORT
SHORT_NAME_MAP = dict(zip(MODULESTORE_SETUPS, MODULESTORE_SHORTNAMES))
CONTENTSTORE_SETUPS = (MongoContentstoreBuilder(),)
COURSE_DATA_NAMES = (
'toy',
'manual-testing-complete',
'split_test_module',
'split_test_module_draft',
)
EXPORTED_COURSE_DIR_NAME = 'exported_source_course'
@ddt.ddt
@attr('mongo')
class CrossStoreXMLRoundtrip(CourseComparisonTest, PartitionTestCase):
"""
This class exists to test XML import and export between different modulestore
classes.
"""
def setUp(self):
super(CrossStoreXMLRoundtrip, self).setUp()
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
@ddt.data(*itertools.product(
MODULESTORE_SETUPS,
MODULESTORE_SETUPS,
CONTENTSTORE_SETUPS,
CONTENTSTORE_SETUPS,
COURSE_DATA_NAMES,
))
@ddt.unpack
def test_round_trip(
self, source_builder, dest_builder, source_content_builder,
dest_content_builder, course_data_name, _mock_tab_from_json
):
# Construct the contentstore for storing the first import
with source_content_builder.build() as source_content:
# Construct the modulestore for storing the first import (using the previously created contentstore)
with source_builder.build(contentstore=source_content) as source_store:
# Construct the contentstore for storing the second import
with dest_content_builder.build() as dest_content:
# Construct the modulestore for storing the second import (using the second contentstore)
with dest_builder.build(contentstore=dest_content) as dest_store:
source_course_key = source_store.make_course_key('a', 'course', 'course')
dest_course_key = dest_store.make_course_key('a', 'course', 'course')
import_course_from_xml(
source_store,
'test_user',
TEST_DATA_DIR,
source_dirs=[course_data_name],
static_content_store=source_content,
target_id=source_course_key,
raise_on_failure=True,
create_if_not_present=True,
)
export_course_to_xml(
source_store,
source_content,
source_course_key,
self.export_dir,
EXPORTED_COURSE_DIR_NAME,
)
import_course_from_xml(
dest_store,
'test_user',
self.export_dir,
source_dirs=[EXPORTED_COURSE_DIR_NAME],
static_content_store=dest_content,
target_id=dest_course_key,
raise_on_failure=True,
create_if_not_present=True,
)
# NOT CURRENTLY USED
# export_course_to_xml(
# dest_store,
# dest_content,
# dest_course_key,
# self.export_dir,
# 'exported_dest_course',
# )
self.exclude_field(None, 'wiki_slug')
self.exclude_field(None, 'xml_attributes')
self.exclude_field(None, 'parent')
self.ignore_asset_key('_id')
self.ignore_asset_key('uploadDate')
self.ignore_asset_key('content_son')
self.ignore_asset_key('thumbnail_location')
self.assertCoursesEqual(
source_store,
source_course_key,
dest_store,
dest_course_key,
)
self.assertAssetsEqual(
source_content,
source_course_key,
dest_content,
dest_course_key,
)
self.assertAssetsMetadataEqual(
source_store,
source_course_key,
dest_store,
dest_course_key,
)
def test_split_course_export_import(self):
# Construct the contentstore for storing the first import
with MongoContentstoreBuilder().build() as source_content:
# Construct the modulestore for storing the first import (using the previously created contentstore)
with SPLIT_MODULESTORE_SETUP.build(contentstore=source_content) as source_store:
# Construct the contentstore for storing the second import
with MongoContentstoreBuilder().build() as dest_content:
# Construct the modulestore for storing the second import (using the second contentstore)
with SPLIT_MODULESTORE_SETUP.build(contentstore=dest_content) as dest_store:
source_course_key = source_store.make_course_key('a', 'source', '2015_Fall')
dest_course_key = dest_store.make_course_key('a', 'dest', '2015_Fall')
import_course_from_xml(
source_store,
'test_user',
TEST_DATA_DIR,
source_dirs=['split_course_with_static_tabs'],
static_content_store=source_content,
target_id=source_course_key,
raise_on_failure=True,
create_if_not_present=True,
)
export_course_to_xml(
source_store,
source_content,
source_course_key,
self.export_dir,
EXPORTED_COURSE_DIR_NAME,
)
source_course = source_store.get_course(source_course_key, depth=None, lazy=False)
self.assertEqual(source_course.url_name, 'course')
export_dir_path = path(self.export_dir)
policy_dir = export_dir_path / 'exported_source_course' / 'policies' / source_course.url_name
policy_path = policy_dir / 'policy.json'
self.assertTrue(os.path.exists(policy_path))
import_course_from_xml(
dest_store,
'test_user',
self.export_dir,
source_dirs=[EXPORTED_COURSE_DIR_NAME],
static_content_store=dest_content,
target_id=dest_course_key,
raise_on_failure=True,
create_if_not_present=True,
)
dest_course = dest_store.get_course(dest_course_key, depth=None, lazy=False)
self.assertEqual(dest_course.url_name, 'course')
| agpl-3.0 |
miyakz1192/neutron | neutron/db/agents_db.py | 9 | 14028 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy.orm import exc
from sqlalchemy import sql
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron.i18n import _LE, _LW
from neutron import manager
LOG = logging.getLogger(__name__)
AGENT_OPTS = [
cfg.IntOpt('agent_down_time', default=75,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")),
cfg.StrOpt('dhcp_load_type', default='networks',
choices=['networks', 'subnets', 'ports'],
help=_('Representing the resource type whose load is being '
'reported by the agent. This can be "networks", '
'"subnets" or "ports". '
'When specified (Default is networks), the server will '
'extract particular load sent as part of its agent '
'configuration object from the agent report state, '
'which is the number of resources being consumed, at '
'every report_interval.'
'dhcp_load_type can be used in combination with '
'network_scheduler_driver = '
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler '
'When the network_scheduler_driver is WeightScheduler, '
'dhcp_load_type can be configured to represent the '
'choice for the resource being balanced. '
'Example: dhcp_load_type=networks')),
]
cfg.CONF.register_opts(AGENT_OPTS)
class Agent(model_base.BASEV2, models_v2.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
model_base.BASEV2.__table_args__
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
server_default=sql.true(), nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(255))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
# load - number of resources hosted by the agent
load = sa.Column(sa.Integer, server_default='0', nullable=False)
@property
def is_active(self):
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
class AgentDbMixin(ext_agent.AgentPluginBase):
"""Mixin class to add agent extension to db_base_plugin_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
def get_enabled_agent_on_host(self, context, agent_type, host):
"""Return agent of agent_type for the specified host."""
query = context.session.query(Agent)
query = query.filter(Agent.agent_type == agent_type,
Agent.host == host,
Agent.admin_state_up == sql.true())
try:
agent = query.one()
except exc.NoResultFound:
LOG.debug('No enabled %(agent_type)s agent on host '
'%(host)s', {'agent_type': agent_type, 'host': host})
return
if self.is_agent_down(agent.heartbeat_timestamp):
LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'),
{'agent_type': agent_type, 'agent_id': agent.id})
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _LW('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _get_agent_load(self, agent):
configs = agent.get('configurations', {})
load_type = None
load = 0
if(agent['agent_type'] == constants.AGENT_TYPE_DHCP):
load_type = cfg.CONF.dhcp_load_type
if load_type:
load = int(configs.get(load_type, 0))
return load
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None):
agents = self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields)
alive = filters and filters.get('alive', None)
if alive:
# alive filter will be a list
alive = attributes.convert_to_boolean(alive[0])
agents = [agent for agent in agents if agent['alive'] == alive]
return agents
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _create_or_update_agent(self, context, agent):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
configurations_dict = agent.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
res['load'] = self._get_agent_load(agent)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = True
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
greenthread.sleep(0)
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry:
# It might happen that two or more concurrent transactions
# are trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has
# been no such entry in the table and multiple agent status
# updates are being processed at the moment). In this case
# having a unique constraint on (agent_type, host) columns
# guarantees that only one transaction will succeed and
# insert a new agent entry, others will fail and be rolled
# back. That means we must retry them one more time: no
# INSERTs will be issued, because
# _get_agent_by_type_and_host() will return the existing
# agent entry, which will be updated multiple times
return self._create_or_update_agent(context, agent)
class AgentExtRpcCallback(object):
"""Processes the rpc report in plugin implementations.
This class implements the server side of an rpc interface. The client side
can be found in neutron.agent.rpc.PluginReportStateAPI. For more
information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
target = oslo_messaging.Target(version='1.0',
namespace=constants.RPC_NAMESPACE_STATE)
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
super(AgentExtRpcCallback, self).__init__()
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server."""
time = kwargs['time']
time = timeutils.parse_strtime(time)
agent_state = kwargs['agent_state']['agent_state']
self._check_clock_sync_on_agent_start(agent_state, time)
if self.START_TIME > time:
time_agent = timeutils.isotime(time)
time_server = timeutils.isotime(self.START_TIME)
log_dict = {'agent_time': time_agent, 'server_time': time_server}
LOG.debug("Stale message received with timestamp: %(agent_time)s. "
"Skipping processing because it's older than the "
"server start timestamp: %(server_time)s", log_dict)
return
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.create_or_update_agent(context, agent_state)
def _check_clock_sync_on_agent_start(self, agent_state, agent_time):
"""Checks if the server and the agent times are in sync.
Method checks if the agent time is in sync with the server time
on start up. Ignores it, on subsequent re-connects.
"""
if agent_state.get('start_flag'):
time_server_now = timeutils.utcnow()
diff = abs((time_server_now - agent_time).seconds)
if diff > cfg.CONF.agent_down_time:
agent_name = agent_state['agent_type']
time_agent = timeutils.isotime(agent_time)
host = agent_state['host']
log_dict = {'host': host,
'agent_name': agent_name,
'agent_time': time_agent,
'threshold': cfg.CONF.agent_down_time,
'serv_time': timeutils.isotime(time_server_now)}
LOG.error(_LE("Message received from the host: %(host)s "
"during the registration of %(agent_name)s has "
"a timestamp: %(agent_time)s. This differs from "
"the current server timestamp: %(serv_time)s by "
"more than the threshold agent down"
"time: %(threshold)s."), log_dict)
| apache-2.0 |
andmos/ansible | lib/ansible/modules/cloud/google/gcp_compute_network_facts.py | 9 | 7157 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_network_facts
description:
- Gather facts for GCP Network
short_description: Gather facts for GCP Network
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters.)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a network facts
gcp_compute_network_facts:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
gateway_ipv4:
description:
- A gateway address for default routing to other networks. This value is read
only and is selected by the Google Compute Engine, typically as the first
usable address in the IPv4Range.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
ipv4_range:
description:
- 'The range of internal addresses that are legal on this network. This range
is a CIDR specification, for example: 192.168.0.0/16. Provided by the client
when the network is created.'
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
subnetworks:
description:
- Server-defined fully-qualified URLs for all subnetworks in this network.
returned: success
type: list
autoCreateSubnetworks:
description:
- When set to true, the network is created in "auto subnet mode". When set to
false, the network is in "custom subnet mode".
- In "auto subnet mode", a newly created network is assigned the default CIDR
of 10.128.0.0/9 and it automatically creates one subnetwork per region.
returned: success
type: bool
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
routingConfig:
description:
- The network-level routing configuration for this network. Used by Cloud Router
to determine what type of network-wide routing behavior to enforce.
returned: success
type: complex
contains:
routingMode:
description:
- The network-wide routing mode to use. If set to REGIONAL, this network's
cloud routers will only advertise routes with subnetworks of this network
in the same region as the router. If set to GLOBAL, this network's cloud
routers will advertise routes with all subnetworks of this network, across
regions.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/networks".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
qian19876025/nixysa | third_party/ply-3.1/test/yacc_rr.py | 174 | 1631 | # -----------------------------------------------------------------------------
# yacc_rr.py
#
# A grammar with a reduce/reduce conflict
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_assign_2(t):
'statement : NAME EQUALS NUMBER'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| apache-2.0 |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/.install/.backup/lib/protorpc/protourlencode.py | 11 | 18448 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""URL encoding support for messages types.
Protocol support for URL encoded form parameters.
Nested Fields:
Nested fields are repesented by dot separated names. For example, consider
the following messages:
class WebPage(Message):
title = StringField(1)
tags = StringField(2, repeated=True)
class WebSite(Message):
name = StringField(1)
home = MessageField(WebPage, 2)
pages = MessageField(WebPage, 3, repeated=True)
And consider the object:
page = WebPage()
page.title = 'Welcome to NewSite 2010'
site = WebSite()
site.name = 'NewSite 2010'
site.home = page
The URL encoded representation of this constellation of objects is.
name=NewSite+2010&home.title=Welcome+to+NewSite+2010
An object that exists but does not have any state can be represented with
a reference to its name alone with no value assigned to it. For example:
page = WebSite()
page.name = 'My Empty Site'
page.home = WebPage()
is represented as:
name=My+Empty+Site&home=
This represents a site with an empty uninitialized home page.
Repeated Fields:
Repeated fields are represented by the name of and the index of each value
separated by a dash. For example, consider the following message:
home = Page()
home.title = 'Nome'
news = Page()
news.title = 'News'
news.tags = ['news', 'articles']
instance = WebSite()
instance.name = 'Super fun site'
instance.pages = [home, news, preferences]
An instance of this message can be represented as:
name=Super+fun+site&page-0.title=Home&pages-1.title=News&...
pages-1.tags-0=new&pages-1.tags-1=articles
Helper classes:
URLEncodedRequestBuilder: Used for encapsulating the logic used for building
a request message from a URL encoded RPC.
"""
import cgi
import re
import urllib
from . import message_types
from . import messages
from . import util
__all__ = ['CONTENT_TYPE',
'URLEncodedRequestBuilder',
'encode_message',
'decode_message',
]
CONTENT_TYPE = 'application/x-www-form-urlencoded'
_FIELD_NAME_REGEX = re.compile(r'^([a-zA-Z_][a-zA-Z_0-9]*)(?:-([0-9]+))?$')
class URLEncodedRequestBuilder(object):
"""Helper that encapsulates the logic used for building URL encoded messages.
This helper is used to map query parameters from a URL encoded RPC to a
message instance.
"""
@util.positional(2)
def __init__(self, message, prefix=''):
"""Constructor.
Args:
message: Message instance to build from parameters.
prefix: Prefix expected at the start of valid parameters.
"""
self.__parameter_prefix = prefix
# The empty tuple indicates the root message, which has no path.
# __messages is a full cache that makes it very easy to look up message
# instances by their paths. See make_path for details about what a path
# is.
self.__messages = {(): message}
# This is a cache that stores paths which have been checked for
# correctness. Correctness means that an index is present for repeated
# fields on the path and absent for non-repeated fields. The cache is
# also used to check that indexes are added in the right order so that
# dicontiguous ranges of indexes are ignored.
self.__checked_indexes = set([()])
def make_path(self, parameter_name):
"""Parse a parameter name and build a full path to a message value.
The path of a method is a tuple of 2-tuples describing the names and
indexes within repeated fields from the root message (the message being
constructed by the builder) to an arbitrarily nested message within it.
Each 2-tuple node of a path (name, index) is:
name: The name of the field that refers to the message instance.
index: The index within a repeated field that refers to the message
instance, None if not a repeated field.
For example, consider:
class VeryInner(messages.Message):
...
class Inner(messages.Message):
very_inner = messages.MessageField(VeryInner, 1, repeated=True)
class Outer(messages.Message):
inner = messages.MessageField(Inner, 1)
If this builder is building an instance of Outer, that instance is
referred to in the URL encoded parameters without a path. Therefore
its path is ().
The child 'inner' is referred to by its path (('inner', None)).
The first child of repeated field 'very_inner' on the Inner instance
is referred to by (('inner', None), ('very_inner', 0)).
Examples:
# Correct reference to model where nation is a Message, district is
# repeated Message and county is any not repeated field type.
>>> make_path('nation.district-2.county')
(('nation', None), ('district', 2), ('county', None))
# Field is not part of model.
>>> make_path('nation.made_up_field')
None
# nation field is not repeated and index provided.
>>> make_path('nation-1')
None
# district field is repeated and no index provided.
>>> make_path('nation.district')
None
Args:
parameter_name: Name of query parameter as passed in from the request.
in order to make a path, this parameter_name must point to a valid
field within the message structure. Nodes of the path that refer to
repeated fields must be indexed with a number, non repeated nodes must
not have an index.
Returns:
Parsed version of the parameter_name as a tuple of tuples:
attribute: Name of attribute associated with path.
index: Postitive integer index when it is a repeated field, else None.
Will return None if the parameter_name does not have the right prefix,
does not point to a field within the message structure, does not have
an index if it is a repeated field or has an index but is not a repeated
field.
"""
if parameter_name.startswith(self.__parameter_prefix):
parameter_name = parameter_name[len(self.__parameter_prefix):]
else:
return None
path = []
name = []
message_type = type(self.__messages[()]) # Get root message.
for item in parameter_name.split('.'):
# This will catch sub_message.real_message_field.not_real_field
if not message_type:
return None
item_match = _FIELD_NAME_REGEX.match(item)
if not item_match:
return None
attribute = item_match.group(1)
index = item_match.group(2)
if index:
index = int(index)
try:
field = message_type.field_by_name(attribute)
except KeyError:
return None
if field.repeated != (index is not None):
return None
if isinstance(field, messages.MessageField):
message_type = field.message_type
else:
message_type = None
# Path is valid so far. Append node and continue.
path.append((attribute, index))
return tuple(path)
def __check_index(self, parent_path, name, index):
"""Check correct index use and value relative to a given path.
Check that for a given path the index is present for repeated fields
and that it is in range for the existing list that it will be inserted
in to or appended to.
Args:
parent_path: Path to check against name and index.
name: Name of field to check for existance.
index: Index to check. If field is repeated, should be a number within
range of the length of the field, or point to the next item for
appending.
"""
# Don't worry about non-repeated fields.
# It's also ok if index is 0 because that means next insert will append.
if not index:
return True
parent = self.__messages.get(parent_path, None)
value_list = getattr(parent, name, None)
# If the list does not exist then the index should be 0. Since it is
# not, path is not valid.
if not value_list:
return False
# The index must either point to an element of the list or to the tail.
return len(value_list) >= index
def __check_indexes(self, path):
"""Check that all indexes are valid and in the right order.
This method must iterate over the path and check that all references
to indexes point to an existing message or to the end of the list, meaning
the next value should be appended to the repeated field.
Args:
path: Path to check indexes for. Tuple of 2-tuples (name, index). See
make_path for more information.
Returns:
True if all the indexes of the path are within range, else False.
"""
if path in self.__checked_indexes:
return True
# Start with the root message.
parent_path = ()
for name, index in path:
next_path = parent_path + ((name, index),)
# First look in the checked indexes cache.
if next_path not in self.__checked_indexes:
if not self.__check_index(parent_path, name, index):
return False
self.__checked_indexes.add(next_path)
parent_path = next_path
return True
def __get_or_create_path(self, path):
"""Get a message from the messages cache or create it and add it.
This method will also create any parent messages based on the path.
When a new instance of a given message is created, it is stored in
__message by its path.
Args:
path: Path of message to get. Path must be valid, in other words
__check_index(path) returns true. Tuple of 2-tuples (name, index).
See make_path for more information.
Returns:
Message instance if the field being pointed to by the path is a
message, else will return None for non-message fields.
"""
message = self.__messages.get(path, None)
if message:
return message
parent_path = ()
parent = self.__messages[()] # Get the root object
for name, index in path:
field = parent.field_by_name(name)
next_path = parent_path + ((name, index),)
next_message = self.__messages.get(next_path, None)
if next_message is None:
next_message = field.message_type()
self.__messages[next_path] = next_message
if not field.repeated:
setattr(parent, field.name, next_message)
else:
list_value = getattr(parent, field.name, None)
if list_value is None:
setattr(parent, field.name, [next_message])
else:
list_value.append(next_message)
parent_path = next_path
parent = next_message
return parent
def add_parameter(self, parameter, values):
"""Add a single parameter.
Adds a single parameter and its value to the request message.
Args:
parameter: Query string parameter to map to request.
values: List of values to assign to request message.
Returns:
True if parameter was valid and added to the message, else False.
Raises:
DecodeError if the parameter refers to a valid field, and the values
parameter does not have one and only one value. Non-valid query
parameters may have multiple values and should not cause an error.
"""
path = self.make_path(parameter)
if not path:
return False
# Must check that all indexes of all items in the path are correct before
# instantiating any of them. For example, consider:
#
# class Repeated(object):
# ...
#
# class Inner(object):
#
# repeated = messages.MessageField(Repeated, 1, repeated=True)
#
# class Outer(object):
#
# inner = messages.MessageField(Inner, 1)
#
# instance = Outer()
# builder = URLEncodedRequestBuilder(instance)
# builder.add_parameter('inner.repeated')
#
# assert not hasattr(instance, 'inner')
#
# The check is done relative to the instance of Outer pass in to the
# constructor of the builder. This instance is not referred to at all
# because all names are assumed to be relative to it.
#
# The 'repeated' part of the path is not correct because it is missing an
# index. Because it is missing an index, it should not create an instance
# of Repeated. In this case add_parameter will return False and have no
# side effects.
#
# A correct path that would cause a new Inner instance to be inserted at
# instance.inner and a new Repeated instance to be appended to the
# instance.inner.repeated list would be 'inner.repeated-0'.
if not self.__check_indexes(path):
return False
# Ok to build objects.
parent_path = path[:-1]
parent = self.__get_or_create_path(parent_path)
name, index = path[-1]
field = parent.field_by_name(name)
if len(values) != 1:
raise messages.DecodeError(
'Found repeated values for field %s.' % field.name)
value = values[0]
if isinstance(field, messages.IntegerField):
converted_value = int(value)
elif isinstance(field, message_types.DateTimeField):
try:
converted_value = util.decode_datetime(value)
except ValueError, e:
raise messages.DecodeError(e)
elif isinstance(field, messages.MessageField):
# Just make sure it's instantiated. Assignment to field or
# appending to list is done in __get_or_create_path.
self.__get_or_create_path(path)
return True
elif isinstance(field, messages.StringField):
converted_value = value.decode('utf-8')
elif isinstance(field, messages.BooleanField):
converted_value = value.lower() == 'true' and True or False
else:
try:
converted_value = field.type(value)
except TypeError:
raise messages.DecodeError('Invalid enum value "%s"' % value)
if field.repeated:
value_list = getattr(parent, field.name, None)
if value_list is None:
setattr(parent, field.name, [converted_value])
else:
if index == len(value_list):
value_list.append(converted_value)
else:
# Index should never be above len(value_list) because it was
# verified during the index check above.
value_list[index] = converted_value
else:
setattr(parent, field.name, converted_value)
return True
@util.positional(1)
def encode_message(message, prefix=''):
"""Encode Message instance to url-encoded string.
Args:
message: Message instance to encode in to url-encoded string.
prefix: Prefix to append to field names of contained values.
Returns:
String encoding of Message in URL encoded format.
Raises:
messages.ValidationError if message is not initialized.
"""
message.check_initialized()
parameters = []
def build_message(parent, prefix):
"""Recursively build parameter list for URL response.
Args:
parent: Message to build parameters for.
prefix: Prefix to append to field names of contained values.
Returns:
True if some value of parent was added to the parameters list,
else False, meaning the object contained no values.
"""
has_any_values = False
for field in sorted(parent.all_fields(), key=lambda f: f.number):
next_value = parent.get_assigned_value(field.name)
if next_value is None:
continue
# Found a value. Ultimate return value should be True.
has_any_values = True
# Normalize all values in to a list.
if not field.repeated:
next_value = [next_value]
for index, item in enumerate(next_value):
# Create a name with an index if it is a repeated field.
if field.repeated:
field_name = '%s%s-%s' % (prefix, field.name, index)
else:
field_name = prefix + field.name
if isinstance(field, message_types.DateTimeField):
# DateTimeField stores its data as a RFC 3339 compliant string.
parameters.append((field_name, item.isoformat()))
elif isinstance(field, messages.MessageField):
# Message fields must be recursed in to in order to construct
# their component parameter values.
if not build_message(item, field_name + '.'):
# The nested message is empty. Append an empty value to
# represent it.
parameters.append((field_name, ''))
elif isinstance(field, messages.BooleanField):
parameters.append((field_name, item and 'true' or 'false'))
else:
if isinstance(item, unicode):
item = item.encode('utf-8')
parameters.append((field_name, str(item)))
return has_any_values
build_message(message, prefix)
# Also add any unrecognized values from the decoded string.
for key in message.all_unrecognized_fields():
values, _ = message.get_unrecognized_field_info(key)
if not isinstance(values, (list, tuple)):
values = (values,)
for value in values:
parameters.append((key, value))
return urllib.urlencode(parameters)
def decode_message(message_type, encoded_message, **kwargs):
"""Decode urlencoded content to message.
Args:
message_type: Message instance to merge URL encoded content into.
encoded_message: URL encoded message.
prefix: Prefix to append to field names of contained values.
Returns:
Decoded instance of message_type.
"""
message = message_type()
builder = URLEncodedRequestBuilder(message, **kwargs)
arguments = cgi.parse_qs(encoded_message, keep_blank_values=True)
for argument, values in sorted(arguments.iteritems()):
added = builder.add_parameter(argument, values)
# Save off any unknown values, so they're still accessible.
if not added:
message.set_unrecognized_field(argument, values, messages.Variant.STRING)
message.check_initialized()
return message
| apache-2.0 |
khkaminska/djangoproject.com | aggregator/feeds.py | 11 | 1695 | from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from django_hosts.resolvers import reverse
from .models import FeedItem, FeedType
class BaseCommunityAggregatorFeed(Feed):
def item_title(self, item):
return item.title
def item_description(self, item):
return item.summary
def item_guid(self, item):
return item.guid
def item_link(self, item):
return item.link
def item_author_name(self, item):
return item.feed.title
def item_author_link(self, item):
return item.feed.public_url
def item_pubdate(self, item):
return item.date_modified
class CommunityAggregatorFeed(BaseCommunityAggregatorFeed):
def get_object(self, request, slug=None):
return get_object_or_404(FeedType, slug=slug)
def items(self, obj):
qs = FeedItem.objects.filter(feed__feed_type=obj)
qs = qs.order_by('-date_modified')
qs = qs.select_related('feed', 'feed__feed_type')
return qs[:25]
def title(self, obj):
return "Django community aggregator: %s" % obj.name
def link(self, obj):
return reverse('aggregator-feed', args=[obj.slug], host='www')
def description(self, obj):
return self.title(obj)
class CommunityAggregatorFirehoseFeed(BaseCommunityAggregatorFeed):
title = 'Django community aggregator firehose'
description = 'All activity from the Django community aggregator'
def link(self):
return reverse('aggregator-firehose-feed', host='www')
def items(self):
qs = FeedItem.objects.order_by('-date_modified').select_related('feed')
return qs[:50]
| bsd-3-clause |
SciGaP/DEPRECATED-Cipres-Airavata-POC | saminda/cipres-airavata/sdk/scripts/remote_resource/trestles/test_lib.py | 3 | 12004 | import os
import string
import math
import re
import subprocess
# I didn't implement getProperties, found it somewhere, just reads a java style
# properties file into a dictionary.
#Note: Fixed by Bryan Lunt <blunt@sdsc.edu> on Oct, 28, 2013 to handle properly escaped spaces in properties files.
import re
__property_separator_regex=""":|=|(?<=[^\\\\]) """
__property_regex = re.compile(__property_separator_regex)
def getProperties(filename):
propFile= file( filename, "rU" )
propDict= dict()
for propLine in propFile:
propDef= propLine.strip()
if len(propDef) == 0:
continue
if propDef[0] in ( '!', '#' ):
continue
separator_location = __property_regex.search(propDef)
if separator_location is not None:
found = separator_location.start()
else:
found = len(propDef)
name= propDef[:found].rstrip().replace('\\ ', ' ')
value= propDef[found:].lstrip(":= ").rstrip()
propDict[name]= value
propFile.close()
# print propDict
return propDict
def getToolType(commandlineString):
if re.search(r'garli', "".join(commandlineString).lower()):
return "garli"
elif re.search(r'raxml', "".join(commandlineString).lower()):
return "raxml"
elif re.search(r'mbhwrapper', "".join(commandlineString).lower()):
return "mrbayes"
elif re.search(r'beast', "".join(commandlineString).lower()):
return "beast"
return None
# There's only one queue. Max runtime should be 2 weeks hrs (which is 336 hrs or 20160 minutes) for user cipres. Not
# sure what it is for other users. CHANGE: also using "shared" queue now.
shared_queue = "shared"
shared_queue_limit = 20160.0
short_queue = "normal"
queues = (("normal", 20160.0), )
cores_per_node = 32
# Effectively get rid of max_nodes by setting it to 5000
max_nodes = 5000
max_cores = max_nodes * cores_per_node
default_cores = cores_per_node
account = "TG-DEB090011"
# account = "ddp116"
scheduler_file = "scheduler.conf"
email = "terri@sdsc.edu,mmiller@sdsc.edu"
jobname = ""
runfile = "./batch_command.run"
statusfile = "./batch_command.status"
cmdfile = "./batch_command.cmdline"
jobdir = os.getcwd()
local_jobdir = "/scratch/cipres/$PBS_JOBID"
jobname = os.environ.get("WB_JOBID", "cipres")
def schedulerInfo(properties, tooltype):
""" properties is a dictionary containing keys:
jobtype, mpi_processes, threads_per_process, nodes, runhours.
Based on properties and hardcoded info about the resource this returns a dictionary
containing:
is_direct, is_mpi, queue, runtime, mpi_processes, nodes, ppn"""
# get runhours from properties and convert it to minutes, default to zero if not specified.
try:
runtime = properties.get("runhours", 0.0)
runtime = math.ceil(float(runtime) * 60 )
except:
runtime = 0.0
qname = 0
qlimit = 1
# if runtime is 0 (which isn't really valid), change it to limit for the shortest queue
# so we have something reasonable to work with.
if runtime == 0.0:
runtime = queues[qname][qlimit]
# based on runtime, figure out which queue we should be using.
queue = None
for entry in queues:
if runtime <= entry[qlimit]:
queue = entry[qname]
break
if queue == None:
queue = queues[-1][qname]
runtime = queues[-1][qlimit]
# Create retval and set values we just determined for runtime and queue. Set defaults for some
# if the other retvals which may be overriden below. Note that for serial jobs we'll need to set nodes=1
# and ppn=1 in the job run script.
retval = {"runtime":runtime, "queue":queue, "threads_per_process":int(properties.get("threads_per_process", 1)),
"nodes": int(properties.get("nodes", 1)), "ppn": int(1),
"mpi_processes":int(properties.get("mpi_processes",1)),
"node_exclusive":int(properties.get("node_exclusive",0))
}
if properties.get("jobtype") == "direct":
retval["is_direct"] = True
return retval
else:
retval["is_direct"] = False
if properties.get("jobtype", "") == "mpi":
retval["is_mpi"] = True
else:
retval["is_mpi"] = False
if (retval["is_mpi"] == True):
# Some of our pise xml interfaces just specify the number of mpi processes they want.
# We round it down to a multiple of the number of cores per node and request enough nodes
# so that each mpi process has its own core.
#
# Not sure if we still have any interfaces like I just described but it's definitely not
# how we want to run garli here, so explicitly exclude it. Garli just specifies
# the number of mpi processes but we always want to use a single node for it.
if (properties.get("nodes", "") == "") and (properties.get("thread_per_process", "") == "") and tooltype != "garli":
processes = int(properties.get("mpi_processes", 1))
processes = int(processes / cores_per_node) * cores_per_node
processes = min(max(processes, default_cores), max_cores)
retval["nodes"] = processes / cores_per_node
retval["mpi_processes"] = processes
retval["ppn"] = int(retval["mpi_processes"]) / int(retval["nodes"]);
# Pise interfaces that have more knowledge of the specific machine explicitly specify
# the number of nodes as well as the number of mpi processes; we don't 2nd guess them.
else:
retval["nodes"] = int(properties.get("nodes", 1));
retval["mpi_processes"] = int(properties.get("mpi_processes", 1));
retval["ppn"] = int(retval["mpi_processes"]) / int(retval["nodes"]);
# Special case for garli. Run small jobs in shared queue.
if (tooltype == "garli") and (retval["mpi_processes"] < cores_per_node):
retval["queue"] = shared_queue
if runtime > shared_queue_limit:
runtime = shared_queue_limit
retval["runtime"] = runtime
else:
# Special case for small, non-mpi raxml jobs, run in the shared queue. Also for beast
if (retval["nodes"] == 1) and ((retval["threads_per_process"] == 8) or (tooltype == "beast")):
queue = shared_queue
retval["queue"] = queue
retval["ppn"] = retval["threads_per_process"]
if runtime > shared_queue_limit:
runtime = shared_queue_limit
retval["runtime"] = runtime
if(retval["node_exclusive"] == 1):
retval["ppn"] = cores_per_node
return retval
def log(filename, message):
f = open(filename, "a")
f.write(message)
f.close()
def deleteJob(jobid, workingdir):
if os.path.isfile(workingdir + "/cancelJobs"):
os.chdir(workingdir)
cmd = "./cancelJobs %d" % jobid
else:
cmd = "qdel %d" % jobid
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate()
output = outerr[0]
err = outerr[1]
if (p.returncode != 0):
raise SystemError("Error running '%s', return code is %d. stdout is '%s', stderr is '%s'" % (cmd,
p.returncode, output, err))
def jobInQueue():
cmd = "qstat"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate()
output = outerr[0]
err = outerr[1]
if (p.returncode != 0):
raise SystemError("Error running qstat, return code is %d. stderr is %s" % (p.returncode, err))
if (len(err) != 0):
raise SystemError("Error running qstat, stderr is %s" % (err))
if (len(output) < 5):
raise SystemError("Error running qstat, output looks wrong: %s" % (output))
# cmd = 'echo "%s" | grep `whoami`' % output
cmd = 'grep `whoami`'
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate(output)
output = outerr[0]
err = outerr[1]
if (len(err) != 0):
raise SystemError("Error piping qstat thru grep: %s" % (err))
output_rows = output.split("\n")
jobs = []
for row in output_rows:
r = row.split()
if len(r) > 4 and r[4] != "C":
r[0] = r[0].split(".", 1)[0]
jobs.append(r[0])
return jobs
# To do: modify RAxML-Light.sh to accept --url argument and pass it here, like --account. Decide whether
# to use --email too, maybe just on the last job? Or ask Mark if he wants all the emails?
def submitDirectJob(account, url, email, jobname, commandline):
# Not exactly a general purpose solution but for raxml-light we can just add account, email and url
# arguments to the command line.
rfile = open(cmdfile, "w")
rfile.write("#!/bin/sh\n")
rfile.write(" ".join(commandline))
rfile.write(" --account %s" % account)
rfile.write(" --url %s" % url)
rfile.write(" --email %s" % email)
rfile.write("\n")
rfile.close()
os.chmod(cmdfile, 0744);
cmd = cmdfile
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate()[0]
retval = p.returncode
if retval != 0:
print "Error submitting job:\n"
print output
log(statusfile, "submitDirectJob is returning %d.\nStdout/stderr is:%s\n" % (retval, output))
# When there's a bash syntax error in a script it exits with 2, but if we return 2, we've
# defined that to mean "too many jobs queued" and cipres will print a special message.
if (retval == 2):
retval = 1
return retval
log(statusfile, "Job submission stdout/stderr is: %s\n" % output)
# output should be just the full job id, <id>.trestles-fe1.sdsc.edu:
firstline = output.splitlines()
if len(firstline) == 1:
firstline = firstline[0]
p = re.compile(r"^(\d+).trestles.\S+", re.M)
m = p.search(output)
if m != None:
jobid = m.group(0)
short_jobid = m.group(1)
print "jobid=%d" % int(short_jobid)
log(statusfile, "JOBID is %s\n" % jobid)
log("./_JOBINFO.TXT", "\nJOBID=%s\n" % jobid)
return 0
print "Error, job submission says: %s" % output
log(statusfile, "can't find jobid, submitDirectJob is returning 1\n")
return 1
# Returns 0 on success, 2 means too many jobs queued.
def submitJob():
cmd = "qsub %s 2>> %s" % (runfile, statusfile)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0]
retval = p.returncode
if retval != 0:
# read whatever qsub wrote to the statusfile and print it to stdout
print "Error submitting job:\n"
f = open(statusfile, "r"); print f.read(), "\n\n"; f.close()
print output
# When we return 2 it means too many jobs are queued. qstat returns -226 on abe
# in this situation ... not sure if that's true here, on trestles as well.
if retval == -226:
retval = 2
log(statusfile, "submit_job is returning %d\n" % retval)
return retval
log(statusfile, "qsub output is: " + output + "\n" +
"======================================================================" + "\n")
# output from qsub should on trestles is just the full job id, <id>.trestles-fe1.sdsc.edu:
p = re.compile(r"^(\d+).trestles.\S+", re.M)
m = p.search(output)
if m != None:
jobid = m.group(0)
short_jobid = m.group(1)
print "jobid=%d" % int(short_jobid)
log(statusfile, "JOBID is %s\n" % jobid)
log("./_JOBINFO.TXT", "\nJOBID=%s\n" % jobid)
return 0
else:
print "Error, qsub says: %s" % output
log(statusfile, "can't get jobid, submit_job is returning 1\n")
return 1
| apache-2.0 |
TemplateVoid/mapnik | scons/scons-local-2.3.1/SCons/Tool/gcc.py | 8 | 2901 | """SCons.Tool.gcc
Tool-specific initialization for gcc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gcc.py 2014/03/02 14:18:15 garyo"
import cc
import os
import re
import subprocess
import SCons.Util
compilers = ['gcc', 'cc']
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CC'] = env.Detect(compilers) or 'gcc'
if env['PLATFORM'] in ['cygwin', 'win32']:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
else:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -fPIC')
# determine compiler version
if env['CC']:
#pipe = SCons.Action._subproc(env, [env['CC'], '-dumpversion'],
pipe = SCons.Action._subproc(env, [env['CC'], '--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() != 0: return
# -dumpversion was added in GCC 3.0. As long as we're supporting
# GCC versions older than that, we should use --version and a
# regular expression.
#line = pipe.stdout.read().strip()
#if line:
# env['CCVERSION'] = line
line = pipe.stdout.readline()
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
env['CCVERSION'] = match.group(0)
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
darktears/chromium-crosswalk | testing/legion/lib/process.py | 25 | 8658 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""RPC compatible subprocess-type module.
This module defined both a task-side process class as well as a controller-side
process wrapper for easier access and usage of the task-side process.
"""
import logging
import os
import subprocess
import sys
import threading
import time
from legion.lib import common_lib
from utils import subprocess42
class TimeoutError(Exception):
pass
class ControllerProcessWrapper(object):
"""Controller-side process wrapper class.
This class provides a more intuitive interface to task-side processes
than calling the methods directly using the RPC object.
"""
def __init__(self, rpc, cmd, verbose=False, detached=False, cwd=None,
key=None, shell=None):
logging.debug('Creating a process with cmd=%s', cmd)
self._rpc = rpc
self._key = rpc.subprocess.Process(cmd, key)
logging.debug('Process created with key=%s', self._key)
if verbose:
self._rpc.subprocess.SetVerbose(self._key)
if detached:
self._rpc.subprocess.SetDetached(self._key)
if cwd:
self._rpc.subprocess.SetCwd(self._key, cwd)
if shell:
self._rpc.subprocess.SetShell(self._key)
self._rpc.subprocess.Start(self._key)
@property
def key(self):
return self._key
def Terminate(self):
logging.debug('Terminating process %s', self._key)
return self._rpc.subprocess.Terminate(self._key)
def Kill(self):
logging.debug('Killing process %s', self._key)
self._rpc.subprocess.Kill(self._key)
def Delete(self):
return self._rpc.subprocess.Delete(self._key)
def GetReturncode(self):
return self._rpc.subprocess.GetReturncode(self._key)
def ReadStdout(self):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
return self._rpc.subprocess.ReadStdout(self._key)
def ReadStderr(self):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadStderr(self._key)
def ReadOutput(self):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadOutput(self._key)
def Wait(self, timeout=None):
return self._rpc.subprocess.Wait(self._key, timeout)
def Poll(self):
return self._rpc.subprocess.Poll(self._key)
def GetPid(self):
return self._rpc.subprocess.GetPid(self._key)
class Process(object):
"""Implements a task-side non-blocking subprocess.
This non-blocking subprocess allows the caller to continue operating while
also able to interact with this subprocess based on a key returned to
the caller at the time of creation.
Creation args are set via Set* methods called after calling Process but
before calling Start. This is due to a limitation of the XML-RPC
implementation not supporting keyword arguments.
"""
_processes = {}
_process_next_id = 0
_creation_lock = threading.Lock()
def __init__(self, cmd, key):
self.stdout = ''
self.stderr = ''
self.key = key
self.cmd = cmd
self.proc = None
self.cwd = None
self.shell = False
self.verbose = False
self.detached = False
self.complete = False
self.data_lock = threading.Lock()
self.stdout_file = open(self._CreateOutputFilename('stdout'), 'wb+')
self.stderr_file = open(self._CreateOutputFilename('stderr'), 'wb+')
def _CreateOutputFilename(self, fname):
return os.path.join(common_lib.GetOutputDir(), '%s.%s' % (self.key, fname))
def __str__(self):
return '%r, cwd=%r, verbose=%r, detached=%r' % (
self.cmd, self.cwd, self.verbose, self.detached)
def _reader(self):
for pipe, data in self.proc.yield_any():
with self.data_lock:
if pipe == 'stdout':
self.stdout += data
self.stdout_file.write(data)
self.stdout_file.flush()
if self.verbose:
sys.stdout.write(data)
else:
self.stderr += data
self.stderr_file.write(data)
self.stderr_file.flush()
if self.verbose:
sys.stderr.write(data)
self.complete = True
@classmethod
def KillAll(cls):
for key in cls._processes:
cls.Kill(key)
@classmethod
def Process(cls, cmd, key=None):
with cls._creation_lock:
if not key:
key = 'Process%d' % cls._process_next_id
cls._process_next_id += 1
if key in cls._processes:
raise KeyError('Key %s already in use' % key)
logging.debug('Creating process %s with cmd %r', key, cmd)
cls._processes[key] = cls(cmd, key)
return key
def _Start(self):
logging.info('Starting process %s', self)
self.proc = subprocess42.Popen(self.cmd, stdout=subprocess42.PIPE,
stderr=subprocess42.PIPE,
detached=self.detached, cwd=self.cwd,
shell=self.shell)
threading.Thread(target=self._reader).start()
@classmethod
def Start(cls, key):
cls._processes[key]._Start()
@classmethod
def SetCwd(cls, key, cwd):
"""Sets the process's cwd."""
logging.debug('Setting %s cwd to %s', key, cwd)
cls._processes[key].cwd = cwd
@classmethod
def SetShell(cls, key):
"""Sets the process's shell arg to True."""
logging.debug('Setting %s.shell = True', key)
cls._processes[key].shell = True
@classmethod
def SetDetached(cls, key):
"""Creates a detached process."""
logging.debug('Setting %s.detached = True', key)
cls._processes[key].detached = True
@classmethod
def SetVerbose(cls, key):
"""Sets the stdout and stderr to be emitted locally."""
logging.debug('Setting %s.verbose = True', key)
cls._processes[key].verbose = True
@classmethod
def Terminate(cls, key):
logging.debug('Terminating process %s', key)
cls._processes[key].proc.terminate()
@classmethod
def Kill(cls, key):
logging.debug('Killing process %s', key)
cls._processes[key].proc.kill()
@classmethod
def Delete(cls, key):
if cls.GetReturncode(key) is None:
logging.warning('Killing %s before deleting it', key)
cls.Kill(key)
logging.debug('Deleting process %s', key)
cls._processes.pop(key)
@classmethod
def GetReturncode(cls, key):
return cls._processes[key].proc.returncode
@classmethod
def ReadStdout(cls, key):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stdout data
stdout = proc.stdout
proc.stdout = ''
return stdout
@classmethod
def ReadStderr(cls, key):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stderr data
stderr = proc.stderr
proc.stderr = ''
return stderr
@classmethod
def ReadOutput(cls, key):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return cls.ReadStdout(key), cls.ReadStderr(key)
@classmethod
def Wait(cls, key, timeout=None):
"""Wait for the process to complete.
We wait for all of the output to be written before returning. This solves
a race condition found on Windows where the output can lag behind the
wait call.
Raises:
TimeoutError if the process doesn't finish in the specified timeout.
"""
end = None if timeout is None else timeout + time.time()
while end is None or end > time.time():
if cls._processes[key].complete:
return
time.sleep(0.05)
raise TimeoutError()
@classmethod
def Poll(cls, key):
return cls._processes[key].proc.poll()
@classmethod
def GetPid(cls, key):
return cls._processes[key].proc.pid
| bsd-3-clause |
antar2801/namebench | nb_third_party/dns/inet.py | 248 | 3236 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generic Internet address helper functions."""
import socket
import dns.ipv4
import dns.ipv6
# We assume that AF_INET is always defined.
AF_INET = socket.AF_INET
# AF_INET6 might not be defined in the socket module, but we need it.
# We'll try to use the socket module's value, and if it doesn't work,
# we'll use our own value.
try:
AF_INET6 = socket.AF_INET6
except AttributeError:
AF_INET6 = 9999
def inet_pton(family, text):
"""Convert the textual form of a network address into its binary form.
@param family: the address family
@type family: int
@param text: the textual address
@type text: string
@raises NotImplementedError: the address family specified is not
implemented.
@rtype: string
"""
if family == AF_INET:
return dns.ipv4.inet_aton(text)
elif family == AF_INET6:
return dns.ipv6.inet_aton(text)
else:
raise NotImplementedError
def inet_ntop(family, address):
"""Convert the binary form of a network address into its textual form.
@param family: the address family
@type family: int
@param address: the binary address
@type address: string
@raises NotImplementedError: the address family specified is not
implemented.
@rtype: string
"""
if family == AF_INET:
return dns.ipv4.inet_ntoa(address)
elif family == AF_INET6:
return dns.ipv6.inet_ntoa(address)
else:
raise NotImplementedError
def af_for_address(text):
"""Determine the address family of a textual-form network address.
@param text: the textual address
@type text: string
@raises ValueError: the address family cannot be determined from the input.
@rtype: int
"""
try:
junk = dns.ipv4.inet_aton(text)
return AF_INET
except:
try:
junk = dns.ipv6.inet_aton(text)
return AF_INET6
except:
raise ValueError
def is_multicast(text):
"""Is the textual-form network address a multicast address?
@param text: the textual address
@raises ValueError: the address family cannot be determined from the input.
@rtype: bool
"""
try:
first = ord(dns.ipv4.inet_aton(text)[0])
return (first >= 224 and first <= 239)
except:
try:
first = ord(dns.ipv6.inet_aton(text)[0])
return (first == 255)
except:
raise ValueError
| apache-2.0 |
ric2b/Vivaldi-browser | chromium/third_party/blink/web_tests/external/wpt/service-workers/service-worker/resources/service-worker-csp-worker.py | 13 | 5991 | bodyDefault = '''
importScripts('worker-testharness.js');
importScripts('test-helpers.sub.js');
importScripts('/common/get-host-info.sub.js');
var host_info = get_host_info();
test(function() {
var import_script_failed = false;
try {
importScripts(host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'empty.js');
} catch(e) {
import_script_failed = true;
}
assert_true(import_script_failed,
'Importing the other origins script should fail.');
}, 'importScripts test for default-src');
test(function() {
assert_throws(EvalError(),
function() { eval('1 + 1'); },
'eval() should throw EvalError.')
assert_throws(EvalError(),
function() { new Function('1 + 1'); },
'new Function() should throw EvalError.')
}, 'eval test for default-src');
async_test(function(t) {
fetch(host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'fetch-access-control.py?ACAOrigin=*',
{mode: 'cors'})
.then(function(response){
assert_unreached('fetch should fail.');
}, function(){
t.done();
})
.catch(unreached_rejection(t));
}, 'Fetch test for default-src');
async_test(function(t) {
var REDIRECT_URL = host_info.HTTPS_ORIGIN +
base_path() + 'redirect.py?Redirect=';
var OTHER_BASE_URL = host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'fetch-access-control.py?'
fetch(REDIRECT_URL + encodeURIComponent(OTHER_BASE_URL + 'ACAOrigin=*'),
{mode: 'cors'})
.then(function(response){
assert_unreached('Redirected fetch should fail.');
}, function(){
t.done();
})
.catch(unreached_rejection(t));
}, 'Redirected fetch test for default-src');'''
bodyScript = '''
importScripts('worker-testharness.js');
importScripts('test-helpers.sub.js');
importScripts('/common/get-host-info.sub.js');
var host_info = get_host_info();
test(function() {
var import_script_failed = false;
try {
importScripts(host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'empty.js');
} catch(e) {
import_script_failed = true;
}
assert_true(import_script_failed,
'Importing the other origins script should fail.');
}, 'importScripts test for script-src');
test(function() {
assert_throws(EvalError(),
function() { eval('1 + 1'); },
'eval() should throw EvalError.')
assert_throws(EvalError(),
function() { new Function('1 + 1'); },
'new Function() should throw EvalError.')
}, 'eval test for script-src');
async_test(function(t) {
fetch(host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'fetch-access-control.py?ACAOrigin=*',
{mode: 'cors'})
.then(function(response){
t.done();
}, function(){
assert_unreached('fetch should not fail.');
})
.catch(unreached_rejection(t));
}, 'Fetch test for script-src');
async_test(function(t) {
var REDIRECT_URL = host_info.HTTPS_ORIGIN +
base_path() + 'redirect.py?Redirect=';
var OTHER_BASE_URL = host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'fetch-access-control.py?'
fetch(REDIRECT_URL + encodeURIComponent(OTHER_BASE_URL + 'ACAOrigin=*'),
{mode: 'cors'})
.then(function(response){
t.done();
}, function(){
assert_unreached('Redirected fetch should not fail.');
})
.catch(unreached_rejection(t));
}, 'Redirected fetch test for script-src');'''
bodyConnect = '''
importScripts('worker-testharness.js');
importScripts('test-helpers.sub.js');
importScripts('/common/get-host-info.sub.js');
var host_info = get_host_info();
test(function() {
var import_script_failed = false;
try {
importScripts(host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'empty.js');
} catch(e) {
import_script_failed = true;
}
assert_false(import_script_failed,
'Importing the other origins script should not fail.');
}, 'importScripts test for connect-src');
test(function() {
var eval_failed = false;
try {
eval('1 + 1');
new Function('1 + 1');
} catch(e) {
eval_failed = true;
}
assert_false(eval_failed,
'connect-src without unsafe-eval should not block eval().');
}, 'eval test for connect-src');
async_test(function(t) {
fetch(host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'fetch-access-control.py?ACAOrigin=*',
{mode: 'cors'})
.then(function(response){
assert_unreached('fetch should fail.');
}, function(){
t.done();
})
.catch(unreached_rejection(t));
}, 'Fetch test for connect-src');
async_test(function(t) {
var REDIRECT_URL = host_info.HTTPS_ORIGIN +
base_path() + 'redirect.py?Redirect=';
var OTHER_BASE_URL = host_info.HTTPS_REMOTE_ORIGIN +
base_path() + 'fetch-access-control.py?'
fetch(REDIRECT_URL + encodeURIComponent(OTHER_BASE_URL + 'ACAOrigin=*'),
{mode: 'cors'})
.then(function(response){
assert_unreached('Redirected fetch should fail.');
}, function(){
t.done();
})
.catch(unreached_rejection(t));
}, 'Redirected fetch test for connect-src');'''
def main(request, response):
headers = []
headers.append(('Content-Type', 'application/javascript'))
directive = request.GET['directive']
body = 'ERROR: Unknown directive'
if directive == 'default':
headers.append(('Content-Security-Policy', "default-src 'self'"))
body = bodyDefault
elif directive == 'script':
headers.append(('Content-Security-Policy', "script-src 'self'"))
body = bodyScript
elif directive == 'connect':
headers.append(('Content-Security-Policy', "connect-src 'self'"))
body = bodyConnect
return headers, body
| bsd-3-clause |
zx8/youtube-dl | youtube_dl/extractor/krasview.py | 55 | 1858 | # encoding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
)
class KrasViewIE(InfoExtractor):
IE_DESC = 'Красвью'
_VALID_URL = r'https?://krasview\.ru/(?:video|embed)/(?P<id>\d+)'
_TEST = {
'url': 'http://krasview.ru/video/512228',
'md5': '3b91003cf85fc5db277870c8ebd98eae',
'info_dict': {
'id': '512228',
'ext': 'mp4',
'title': 'Снег, лёд, заносы',
'description': 'Снято в городе Нягань, в Ханты-Мансийском автономном округе.',
'duration': 27,
'thumbnail': 're:^https?://.*\.jpg',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
flashvars = json.loads(js_to_json(self._search_regex(
r'video_Init\(({.+?})', webpage, 'flashvars')))
video_url = flashvars['url']
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = flashvars.get('image') or self._og_search_thumbnail(webpage)
duration = int_or_none(flashvars.get('duration'))
width = int_or_none(self._og_search_property(
'video:width', webpage, 'video width', default=None))
height = int_or_none(self._og_search_property(
'video:height', webpage, 'video height', default=None))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'width': width,
'height': height,
}
| unlicense |
citrix-openstack-build/glance | glance/common/config.py | 3 | 9207 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Glance
"""
import logging
import logging.config
import logging.handlers
import os
import sys
from oslo.config import cfg
from paste import deploy
from glance.version import version_info as version
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_('Partial name of a pipeline in your paste configuration '
'file with the service name removed. For example, if '
'your paste section name is '
'[pipeline:glance-api-keystone] use the value '
'"keystone"')),
cfg.StrOpt('config_file',
help=_('Name of the paste configuration file.')),
]
common_opts = [
cfg.BoolOpt('allow_additional_image_properties', default=True,
help=_('Whether to allow users to specify image properties '
'beyond what the image schema provides')),
cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api',
help=_('Python module path of data access API')),
cfg.IntOpt('limit_param_default', default=25,
help=_('Default value for the number of items returned by a '
'request if not specified explicitly in the request')),
cfg.IntOpt('api_limit_max', default=1000,
help=_('Maximum permissible number of items that could be '
'returned by a request')),
cfg.BoolOpt('show_image_direct_url', default=False,
help=_('Whether to include the backend image storage location '
'in image properties. Revealing storage location can be a '
'security risk, so use this setting with caution!')),
cfg.BoolOpt('show_multiple_locations', default=False,
help=_('Whether to include the backend image locations '
'in image properties. Revealing storage location can '
'be a security risk, so use this setting with '
'caution! The overrides show_image_direct_url.')),
cfg.IntOpt('image_size_cap', default=1099511627776,
help=_("Maximum size of image a user can upload in bytes. "
"Defaults to 1099511627776 bytes (1 TB).")),
cfg.IntOpt('user_storage_quota', default=0,
help=_("Set a system wide quota for every user. This value is "
"the total number of bytes that a user can use across "
"all storage systems. A value of 0 means unlimited.")),
cfg.BoolOpt('enable_v1_api', default=True,
help=_("Deploy the v1 OpenStack Images API. ")),
cfg.BoolOpt('enable_v2_api', default=True,
help=_("Deploy the v2 OpenStack Images API. ")),
cfg.StrOpt('pydev_worker_debug_host', default=None,
help=_('The hostname/IP of the pydev process listening for '
'debug connections')),
cfg.IntOpt('pydev_worker_debug_port', default=5678,
help=_('The port on which a pydev process is listening for '
'connections.')),
cfg.StrOpt('metadata_encryption_key', secret=True,
help=_('Key used for encrypting sensitive metadata while '
'talking to the registry or database.')),
]
CONF = cfg.CONF
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
CONF.register_opts(common_opts)
CONF.import_opt('verbose', 'glance.openstack.common.log')
CONF.import_opt('debug', 'glance.openstack.common.log')
CONF.import_opt('log_dir', 'glance.openstack.common.log')
CONF.import_opt('log_file', 'glance.openstack.common.log')
CONF.import_opt('log_config', 'glance.openstack.common.log')
CONF.import_opt('log_format', 'glance.openstack.common.log')
CONF.import_opt('log_date_format', 'glance.openstack.common.log')
CONF.import_opt('use_syslog', 'glance.openstack.common.log')
CONF.import_opt('syslog_log_facility', 'glance.openstack.common.log')
def parse_args(args=None, usage=None, default_config_files=None):
CONF(args=args,
project='glance',
version=version.cached_version_string(),
usage=usage,
default_config_files=default_config_files)
def parse_cache_args(args=None):
config_files = cfg.find_config_files(project='glance', prog='glance-cache')
parse_args(args=args, default_config_files=config_files)
def setup_logging():
"""
Sets up the logging options for a log with supplied name
"""
if CONF.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(CONF.log_config):
logging.config.fileConfig(CONF.log_config)
return
else:
raise RuntimeError("Unable to locate specified logging "
"config file: %s" % CONF.log_config)
root_logger = logging.root
if CONF.debug:
root_logger.setLevel(logging.DEBUG)
elif CONF.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(CONF.log_format, CONF.log_date_format)
if CONF.use_syslog:
try:
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility)
except AttributeError:
raise ValueError(_("Invalid syslog facility"))
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
elif CONF.log_file:
logfile = CONF.log_file
if CONF.log_dir:
logfile = os.path.join(CONF.log_dir, logfile)
handler = logging.handlers.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def _get_deployment_flavor(flavor=None):
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
:param flavor: if specified, use this setting rather than the
paste_deploy.flavor configuration setting
"""
if not flavor:
flavor = CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_paste_config_path():
paste_suffix = '-paste.ini'
conf_suffix = '.conf'
if CONF.config_file:
# Assume paste config is in a paste.ini file corresponding
# to the last config file
path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
else:
path = CONF.prog + paste_suffix
return CONF.find_file(os.path.basename(path))
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
path = CONF.paste_deploy.config_file
if not path:
path = _get_paste_config_path()
if not path:
msg = "Unable to locate paste config file for %s." % CONF.prog
raise RuntimeError(msg)
return os.path.abspath(path)
def load_paste_app(app_name, flavor=None, conf_file=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file, if conf_file is None.
:param app_name: name of the application to load
:param flavor: name of the variant of the application to load
:param conf_file: path to the paste config file
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor(flavor)
if not conf_file:
conf_file = _get_deployment_config_file()
try:
logger = logging.getLogger(__name__)
logger.debug(_("Loading %(app_name)s from %(conf_file)s"),
{'conf_file': conf_file, 'app_name': app_name})
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
# Log the options used when starting if we're in debug mode...
if CONF.debug:
CONF.log_opt_values(logger, logging.DEBUG)
return app
except (LookupError, ImportError) as e:
msg = _("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % locals()
logger.error(msg)
raise RuntimeError(msg)
| apache-2.0 |
yanjiegit/andriod-repo | manifest_xml.py | 1 | 24888 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import itertools
import os
import re
import sys
import urlparse
import xml.dom.minidom
from git_config import GitConfig
from git_refs import R_HEADS, HEAD
from project import RemoteSpec, Project, MetaProject
from error import ManifestParseError
MANIFEST_FILE_NAME = 'manifest.xml'
LOCAL_MANIFEST_NAME = 'local_manifest.xml'
LOCAL_MANIFESTS_DIR_NAME = 'local_manifests'
urlparse.uses_relative.extend(['ssh', 'git'])
urlparse.uses_netloc.extend(['ssh', 'git'])
class _Default(object):
"""Project defaults within the manifest."""
revisionExpr = None
remote = None
sync_j = 1
sync_c = False
sync_s = False
class _XmlRemote(object):
def __init__(self,
name,
alias=None,
fetch=None,
manifestUrl=None,
review=None):
self.name = name
self.fetchUrl = fetch
self.manifestUrl = manifestUrl
self.remoteAlias = alias
self.reviewUrl = review
self.resolvedFetchUrl = self._resolveFetchUrl()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
def _resolveFetchUrl(self):
url = self.fetchUrl.rstrip('/')
manifestUrl = self.manifestUrl.rstrip('/')
# urljoin will get confused if there is no scheme in the base url
# ie, if manifestUrl is of the form <hostname:port>
if manifestUrl.find(':') != manifestUrl.find('/') - 1:
manifestUrl = 'gopher://' + manifestUrl
url = urlparse.urljoin(manifestUrl, url)
return re.sub(r'^gopher://', '', url)
def ToRemoteSpec(self, projectName):
url = self.resolvedFetchUrl.rstrip('/') + '/' + projectName
remoteName = self.name
if self.remoteAlias:
remoteName = self.remoteAlias
return RemoteSpec(remoteName, url, self.reviewUrl)
class XmlManifest(object):
"""manages the repo configuration file"""
def __init__(self, repodir):
self.repodir = os.path.abspath(repodir)
self.topdir = os.path.dirname(self.repodir)
self.manifestFile = os.path.join(self.repodir, MANIFEST_FILE_NAME)
self.globalConfig = GitConfig.ForUser()
self.repoProject = MetaProject(self, 'repo',
gitdir = os.path.join(repodir, 'repo/.git'),
worktree = os.path.join(repodir, 'repo'))
self.manifestProject = MetaProject(self, 'manifests',
gitdir = os.path.join(repodir, 'manifests.git'),
worktree = os.path.join(repodir, 'manifests'))
self._Unload()
def Override(self, name):
"""Use a different manifest, just for the current instantiation.
"""
path = os.path.join(self.manifestProject.worktree, name)
if not os.path.isfile(path):
raise ManifestParseError('manifest %s not found' % name)
old = self.manifestFile
try:
self.manifestFile = path
self._Unload()
self._Load()
finally:
self.manifestFile = old
def Link(self, name):
"""Update the repo metadata to use a different manifest.
"""
self.Override(name)
try:
if os.path.exists(self.manifestFile):
os.remove(self.manifestFile)
os.symlink('manifests/%s' % name, self.manifestFile)
except OSError:
raise ManifestParseError('cannot link manifest %s' % name)
def _RemoteToXml(self, r, doc, root):
e = doc.createElement('remote')
root.appendChild(e)
e.setAttribute('name', r.name)
e.setAttribute('fetch', r.fetchUrl)
if r.reviewUrl is not None:
e.setAttribute('review', r.reviewUrl)
def Save(self, fd, peg_rev=False, peg_rev_upstream=True):
"""Write the current manifest out to the given file descriptor.
"""
mp = self.manifestProject
groups = mp.config.GetString('manifest.groups')
if not groups:
groups = 'all'
groups = [x for x in re.split(r'[,\s]+', groups) if x]
doc = xml.dom.minidom.Document()
root = doc.createElement('manifest')
doc.appendChild(root)
# Save out the notice. There's a little bit of work here to give it the
# right whitespace, which assumes that the notice is automatically indented
# by 4 by minidom.
if self.notice:
notice_element = root.appendChild(doc.createElement('notice'))
notice_lines = self.notice.splitlines()
indented_notice = ('\n'.join(" "*4 + line for line in notice_lines))[4:]
notice_element.appendChild(doc.createTextNode(indented_notice))
d = self.default
sort_remotes = list(self.remotes.keys())
sort_remotes.sort()
for r in sort_remotes:
self._RemoteToXml(self.remotes[r], doc, root)
if self.remotes:
root.appendChild(doc.createTextNode(''))
have_default = False
e = doc.createElement('default')
if d.remote:
have_default = True
e.setAttribute('remote', d.remote.name)
if d.revisionExpr:
have_default = True
e.setAttribute('revision', d.revisionExpr)
if d.sync_j > 1:
have_default = True
e.setAttribute('sync-j', '%d' % d.sync_j)
if d.sync_c:
have_default = True
e.setAttribute('sync-c', 'true')
if d.sync_s:
have_default = True
e.setAttribute('sync-s', 'true')
if have_default:
root.appendChild(e)
root.appendChild(doc.createTextNode(''))
if self._manifest_server:
e = doc.createElement('manifest-server')
e.setAttribute('url', self._manifest_server)
root.appendChild(e)
root.appendChild(doc.createTextNode(''))
def output_projects(parent, parent_node, projects):
for p in projects:
output_project(parent, parent_node, self.projects[p])
def output_project(parent, parent_node, p):
if not p.MatchesGroups(groups):
return
name = p.name
relpath = p.relpath
if parent:
name = self._UnjoinName(parent.name, name)
relpath = self._UnjoinRelpath(parent.relpath, relpath)
e = doc.createElement('project')
parent_node.appendChild(e)
e.setAttribute('name', name)
if relpath != name:
e.setAttribute('path', relpath)
if not d.remote or p.remote.name != d.remote.name:
e.setAttribute('remote', p.remote.name)
if peg_rev:
if self.IsMirror:
value = p.bare_git.rev_parse(p.revisionExpr + '^0')
else:
value = p.work_git.rev_parse(HEAD + '^0')
e.setAttribute('revision', value)
if peg_rev_upstream and value != p.revisionExpr:
# Only save the origin if the origin is not a sha1, and the default
# isn't our value, and the if the default doesn't already have that
# covered.
e.setAttribute('upstream', p.revisionExpr)
elif not d.revisionExpr or p.revisionExpr != d.revisionExpr:
e.setAttribute('revision', p.revisionExpr)
for c in p.copyfiles:
ce = doc.createElement('copyfile')
ce.setAttribute('src', c.src)
ce.setAttribute('dest', c.dest)
e.appendChild(ce)
default_groups = ['all', 'name:%s' % p.name, 'path:%s' % p.relpath]
egroups = [g for g in p.groups if g not in default_groups]
if egroups:
e.setAttribute('groups', ','.join(egroups))
for a in p.annotations:
if a.keep == "true":
ae = doc.createElement('annotation')
ae.setAttribute('name', a.name)
ae.setAttribute('value', a.value)
e.appendChild(ae)
if p.sync_c:
e.setAttribute('sync-c', 'true')
if p.sync_s:
e.setAttribute('sync-s', 'true')
if p.subprojects:
sort_projects = [subp.name for subp in p.subprojects]
sort_projects.sort()
output_projects(p, e, sort_projects)
sort_projects = [key for key in self.projects.keys()
if not self.projects[key].parent]
sort_projects.sort()
output_projects(None, root, sort_projects)
if self._repo_hooks_project:
root.appendChild(doc.createTextNode(''))
e = doc.createElement('repo-hooks')
e.setAttribute('in-project', self._repo_hooks_project.name)
e.setAttribute('enabled-list',
' '.join(self._repo_hooks_project.enabled_repo_hooks))
root.appendChild(e)
doc.writexml(fd, '', ' ', '\n', 'UTF-8')
@property
def projects(self):
self._Load()
return self._projects
@property
def remotes(self):
self._Load()
return self._remotes
@property
def default(self):
self._Load()
return self._default
@property
def repo_hooks_project(self):
self._Load()
return self._repo_hooks_project
@property
def notice(self):
self._Load()
return self._notice
@property
def manifest_server(self):
self._Load()
return self._manifest_server
@property
def IsMirror(self):
return self.manifestProject.config.GetBoolean('repo.mirror')
def _Unload(self):
self._loaded = False
self._projects = {}
self._remotes = {}
self._default = None
self._repo_hooks_project = None
self._notice = None
self.branch = None
self._manifest_server = None
def _Load(self):
if not self._loaded:
m = self.manifestProject
b = m.GetBranch(m.CurrentBranch).merge
if b is not None and b.startswith(R_HEADS):
b = b[len(R_HEADS):]
self.branch = b
nodes = []
nodes.append(self._ParseManifestXml(self.manifestFile,
self.manifestProject.worktree))
local = os.path.join(self.repodir, LOCAL_MANIFEST_NAME)
if os.path.exists(local):
print('warning: %s is deprecated; put local manifests in %s instead'
% (LOCAL_MANIFEST_NAME, LOCAL_MANIFESTS_DIR_NAME),
file=sys.stderr)
nodes.append(self._ParseManifestXml(local, self.repodir))
local_dir = os.path.abspath(os.path.join(self.repodir, LOCAL_MANIFESTS_DIR_NAME))
try:
for local_file in sorted(os.listdir(local_dir)):
if local_file.endswith('.xml'):
try:
nodes.append(self._ParseManifestXml(local_file, self.repodir))
except ManifestParseError as e:
print('%s' % str(e), file=sys.stderr)
except OSError:
pass
self._ParseManifest(nodes)
if self.IsMirror:
self._AddMetaProjectMirror(self.repoProject)
self._AddMetaProjectMirror(self.manifestProject)
self._loaded = True
def _ParseManifestXml(self, path, include_root):
try:
root = xml.dom.minidom.parse(path)
except (OSError, xml.parsers.expat.ExpatError) as e:
raise ManifestParseError("error parsing manifest %s: %s" % (path, e))
if not root or not root.childNodes:
raise ManifestParseError("no root node in %s" % (path,))
for manifest in root.childNodes:
if manifest.nodeName == 'manifest':
break
else:
raise ManifestParseError("no <manifest> in %s" % (path,))
nodes = []
for node in manifest.childNodes: # pylint:disable=W0631
# We only get here if manifest is initialised
if node.nodeName == 'include':
name = self._reqatt(node, 'name')
fp = os.path.join(include_root, name)
if not os.path.isfile(fp):
raise ManifestParseError, \
"include %s doesn't exist or isn't a file" % \
(name,)
try:
nodes.extend(self._ParseManifestXml(fp, include_root))
# should isolate this to the exact exception, but that's
# tricky. actual parsing implementation may vary.
except (KeyboardInterrupt, RuntimeError, SystemExit):
raise
except Exception as e:
raise ManifestParseError(
"failed parsing included manifest %s: %s", (name, e))
else:
nodes.append(node)
return nodes
def _ParseManifest(self, node_list):
for node in itertools.chain(*node_list):
if node.nodeName == 'remote':
remote = self._ParseRemote(node)
if remote:
if remote.name in self._remotes:
if remote != self._remotes[remote.name]:
raise ManifestParseError(
'remote %s already exists with different attributes' %
(remote.name))
else:
self._remotes[remote.name] = remote
for node in itertools.chain(*node_list):
if node.nodeName == 'default':
if self._default is not None:
raise ManifestParseError(
'duplicate default in %s' %
(self.manifestFile))
self._default = self._ParseDefault(node)
if self._default is None:
self._default = _Default()
for node in itertools.chain(*node_list):
if node.nodeName == 'notice':
if self._notice is not None:
raise ManifestParseError(
'duplicate notice in %s' %
(self.manifestFile))
self._notice = self._ParseNotice(node)
for node in itertools.chain(*node_list):
if node.nodeName == 'manifest-server':
url = self._reqatt(node, 'url')
if self._manifest_server is not None:
raise ManifestParseError(
'duplicate manifest-server in %s' %
(self.manifestFile))
self._manifest_server = url
def recursively_add_projects(project):
if self._projects.get(project.name):
raise ManifestParseError(
'duplicate project %s in %s' %
(project.name, self.manifestFile))
self._projects[project.name] = project
for subproject in project.subprojects:
recursively_add_projects(subproject)
for node in itertools.chain(*node_list):
if node.nodeName == 'project':
project = self._ParseProject(node)
recursively_add_projects(project)
if node.nodeName == 'repo-hooks':
# Get the name of the project and the (space-separated) list of enabled.
repo_hooks_project = self._reqatt(node, 'in-project')
enabled_repo_hooks = self._reqatt(node, 'enabled-list').split()
# Only one project can be the hooks project
if self._repo_hooks_project is not None:
raise ManifestParseError(
'duplicate repo-hooks in %s' %
(self.manifestFile))
# Store a reference to the Project.
try:
self._repo_hooks_project = self._projects[repo_hooks_project]
except KeyError:
raise ManifestParseError(
'project %s not found for repo-hooks' %
(repo_hooks_project))
# Store the enabled hooks in the Project object.
self._repo_hooks_project.enabled_repo_hooks = enabled_repo_hooks
if node.nodeName == 'remove-project':
name = self._reqatt(node, 'name')
try:
del self._projects[name]
except KeyError:
raise ManifestParseError('remove-project element specifies non-existent '
'project: %s' % name)
# If the manifest removes the hooks project, treat it as if it deleted
# the repo-hooks element too.
if self._repo_hooks_project and (self._repo_hooks_project.name == name):
self._repo_hooks_project = None
def _AddMetaProjectMirror(self, m):
name = None
m_url = m.GetRemote(m.remote.name).url
if m_url.endswith('/.git'):
raise ManifestParseError, 'refusing to mirror %s' % m_url
if self._default and self._default.remote:
url = self._default.remote.resolvedFetchUrl
if not url.endswith('/'):
url += '/'
if m_url.startswith(url):
remote = self._default.remote
name = m_url[len(url):]
if name is None:
s = m_url.rindex('/') + 1
manifestUrl = self.manifestProject.config.GetString('remote.origin.url')
remote = _XmlRemote('origin', fetch=m_url[:s], manifestUrl=manifestUrl)
name = m_url[s:]
if name.endswith('.git'):
name = name[:-4]
if name not in self._projects:
m.PreSync()
gitdir = os.path.join(self.topdir, '%s.git' % name)
project = Project(manifest = self,
name = name,
remote = remote.ToRemoteSpec(name),
gitdir = gitdir,
worktree = None,
relpath = None,
revisionExpr = m.revisionExpr,
revisionId = None)
self._projects[project.name] = project
def _ParseRemote(self, node):
"""
reads a <remote> element from the manifest file
"""
name = self._reqatt(node, 'name')
alias = node.getAttribute('alias')
if alias == '':
alias = None
fetch = self._reqatt(node, 'fetch')
review = node.getAttribute('review')
if review == '':
review = None
manifestUrl = self.manifestProject.config.GetString('remote.origin.url')
return _XmlRemote(name, alias, fetch, manifestUrl, review)
def _ParseDefault(self, node):
"""
reads a <default> element from the manifest file
"""
d = _Default()
d.remote = self._get_remote(node)
d.revisionExpr = node.getAttribute('revision')
if d.revisionExpr == '':
d.revisionExpr = None
sync_j = node.getAttribute('sync-j')
if sync_j == '' or sync_j is None:
d.sync_j = 1
else:
d.sync_j = int(sync_j)
sync_c = node.getAttribute('sync-c')
if not sync_c:
d.sync_c = False
else:
d.sync_c = sync_c.lower() in ("yes", "true", "1")
sync_s = node.getAttribute('sync-s')
if not sync_s:
d.sync_s = False
else:
d.sync_s = sync_s.lower() in ("yes", "true", "1")
return d
def _ParseNotice(self, node):
"""
reads a <notice> element from the manifest file
The <notice> element is distinct from other tags in the XML in that the
data is conveyed between the start and end tag (it's not an empty-element
tag).
The white space (carriage returns, indentation) for the notice element is
relevant and is parsed in a way that is based on how python docstrings work.
In fact, the code is remarkably similar to here:
http://www.python.org/dev/peps/pep-0257/
"""
# Get the data out of the node...
notice = node.childNodes[0].data
# Figure out minimum indentation, skipping the first line (the same line
# as the <notice> tag)...
minIndent = sys.maxint
lines = notice.splitlines()
for line in lines[1:]:
lstrippedLine = line.lstrip()
if lstrippedLine:
indent = len(line) - len(lstrippedLine)
minIndent = min(indent, minIndent)
# Strip leading / trailing blank lines and also indentation.
cleanLines = [lines[0].strip()]
for line in lines[1:]:
cleanLines.append(line[minIndent:].rstrip())
# Clear completely blank lines from front and back...
while cleanLines and not cleanLines[0]:
del cleanLines[0]
while cleanLines and not cleanLines[-1]:
del cleanLines[-1]
return '\n'.join(cleanLines)
def _JoinName(self, parent_name, name):
return os.path.join(parent_name, name)
def _UnjoinName(self, parent_name, name):
return os.path.relpath(name, parent_name)
def _ParseProject(self, node, parent = None):
"""
reads a <project> element from the manifest file
"""
name = self._reqatt(node, 'name')
if parent:
name = self._JoinName(parent.name, name)
remote = self._get_remote(node)
if remote is None:
remote = self._default.remote
if remote is None:
raise ManifestParseError, \
"no remote for project %s within %s" % \
(name, self.manifestFile)
revisionExpr = node.getAttribute('revision')
if not revisionExpr:
revisionExpr = self._default.revisionExpr
if not revisionExpr:
raise ManifestParseError, \
"no revision for project %s within %s" % \
(name, self.manifestFile)
path = node.getAttribute('path')
if not path:
path = name
if path.startswith('/'):
raise ManifestParseError, \
"project %s path cannot be absolute in %s" % \
(name, self.manifestFile)
rebase = node.getAttribute('rebase')
if not rebase:
rebase = True
else:
rebase = rebase.lower() in ("yes", "true", "1")
sync_c = node.getAttribute('sync-c')
if not sync_c:
sync_c = False
else:
sync_c = sync_c.lower() in ("yes", "true", "1")
sync_s = node.getAttribute('sync-s')
if not sync_s:
sync_s = self._default.sync_s
else:
sync_s = sync_s.lower() in ("yes", "true", "1")
upstream = node.getAttribute('upstream')
groups = ''
if node.hasAttribute('groups'):
groups = node.getAttribute('groups')
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if parent is None:
relpath, worktree, gitdir = self.GetProjectPaths(name, path)
else:
relpath, worktree, gitdir = self.GetSubprojectPaths(parent, path)
default_groups = ['all', 'name:%s' % name, 'path:%s' % relpath]
groups.extend(set(default_groups).difference(groups))
project = Project(manifest = self,
name = name,
remote = remote.ToRemoteSpec(name),
gitdir = gitdir,
worktree = worktree,
relpath = relpath,
revisionExpr = revisionExpr,
revisionId = None,
rebase = rebase,
groups = groups,
sync_c = sync_c,
sync_s = sync_s,
upstream = upstream,
parent = parent)
for n in node.childNodes:
if n.nodeName == 'copyfile':
self._ParseCopyFile(project, n)
if n.nodeName == 'annotation':
self._ParseAnnotation(project, n)
if n.nodeName == 'project':
project.subprojects.append(self._ParseProject(n, parent = project))
return project
def GetProjectPaths(self, name, path):
relpath = path
if self.IsMirror:
worktree = None
gitdir = os.path.join(self.topdir, '%s.git' % name)
else:
worktree = os.path.join(self.topdir, path).replace('\\', '/')
gitdir = os.path.join(self.repodir, 'projects', '%s.git' % path)
return relpath, worktree, gitdir
def GetSubprojectName(self, parent, submodule_path):
return os.path.join(parent.name, submodule_path)
def _JoinRelpath(self, parent_relpath, relpath):
return os.path.join(parent_relpath, relpath)
def _UnjoinRelpath(self, parent_relpath, relpath):
return os.path.relpath(relpath, parent_relpath)
def GetSubprojectPaths(self, parent, path):
relpath = self._JoinRelpath(parent.relpath, path)
gitdir = os.path.join(parent.gitdir, 'subprojects', '%s.git' % path)
if self.IsMirror:
worktree = None
else:
worktree = os.path.join(parent.worktree, path).replace('\\', '/')
return relpath, worktree, gitdir
def _ParseCopyFile(self, project, node):
src = self._reqatt(node, 'src')
dest = self._reqatt(node, 'dest')
if not self.IsMirror:
# src is project relative;
# dest is relative to the top of the tree
project.AddCopyFile(src, dest, os.path.join(self.topdir, dest))
def _ParseAnnotation(self, project, node):
name = self._reqatt(node, 'name')
value = self._reqatt(node, 'value')
try:
keep = self._reqatt(node, 'keep').lower()
except ManifestParseError:
keep = "true"
if keep != "true" and keep != "false":
raise ManifestParseError, "optional \"keep\" attribute must be \"true\" or \"false\""
project.AddAnnotation(name, value, keep)
def _get_remote(self, node):
name = node.getAttribute('remote')
if not name:
return None
v = self._remotes.get(name)
if not v:
raise ManifestParseError, \
"remote %s not defined in %s" % \
(name, self.manifestFile)
return v
def _reqatt(self, node, attname):
"""
reads a required attribute from the node.
"""
v = node.getAttribute(attname)
if not v:
raise ManifestParseError, \
"no %s in <%s> within %s" % \
(attname, node.nodeName, self.manifestFile)
return v
| apache-2.0 |
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/ibus/interface/ienginefactory.py | 6 | 2089 | # vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2010 Peng Huang <shawn.p.huang@gmail.com>
# Copyright (c) 2007-2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
__all__ = ("IEngineFactory", )
import dbus.service
from ibus.common import \
IBUS_IFACE_ENGINE_FACTORY
class IEngineFactory(dbus.service.Object):
# define method decorator.
method = lambda **args: \
dbus.service.method(dbus_interface=IBUS_IFACE_ENGINE_FACTORY, \
**args)
# define async method decorator.
async_method = lambda **args: \
dbus.service.method(dbus_interface=IBUS_IFACE_ENGINE_FACTORY, \
async_callbacks=("reply_cb", "error_cb"), \
**args)
# Return a array. [name, default_language, icon_path, authors, credits]
@method(out_signature="as")
def GetInfo(self): pass
# Factory should allocate all resources in this method
@method()
def Initialize(self): pass
# Factory should free all allocated resources in this method
@method()
def Uninitialize(self): pass
# Create an input context and return the id of the context.
# If failed, it will return "" or None.
@method(in_signature="s", out_signature="o")
def CreateEngine(self, engine_name): pass
# Destroy the engine
@method()
def Destroy(self): pass
| mit |
bourdakos1/Fetch | requests/packages/urllib3/contrib/ntlmpool.py | 199 | 4546 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| apache-2.0 |
jackjennings/Mechanic | src/lib/mechanic/ui/lists/settings.py | 6 | 1252 | from vanilla import CheckBoxListCell
from mechanic.storage import Storage
from mechanic.extension import Extension
from mechanic.ui.lists.extension import ExtensionList
from mechanic.ui.formatters.version import VersionFormatter
class SettingsList(ExtensionList):
"""Return an ExtensionList for settings window."""
columns = [{"title": "Check",
"key": "check_for_updates",
"width": 40,
"editable": True,
"cell": CheckBoxListCell()},
{"title": "Extension",
"key": "name",
"width": 300,
"editable": False},
{"title": "Version",
"key": "local_version",
"editable": False,
"formatter": VersionFormatter.alloc().init()}]
def __init__(self, posSize, **kwargs):
kwargs.update({
'editCallback': self.save
})
configured = [e for e in Extension.all() if e.is_configured]
super(SettingsList, self).__init__(posSize, configured, **kwargs)
def save(self, sender):
rows = self.get()
ignore = {r["name"]: True for r in rows if not r["check_for_updates"]}
Storage.set('ignore', ignore)
| mit |
hachard/Cra-Magnet | flask/lib/python3.5/site-packages/sqlparse/filters/reindent.py | 9 | 6939 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
from sqlparse import sql, tokens as T
from sqlparse.compat import text_type
from sqlparse.utils import offset, indent
class ReindentFilter(object):
def __init__(self, width=2, char=' ', wrap_after=0, n='\n',
comma_first=False):
self.n = n
self.width = width
self.char = char
self.indent = 0
self.offset = 0
self.wrap_after = wrap_after
self.comma_first = comma_first
self._curr_stmt = None
self._last_stmt = None
def _flatten_up_to_token(self, token):
"""Yields all tokens up to token but excluding current."""
if token.is_group:
token = next(token.flatten())
for t in self._curr_stmt.flatten():
if t == token:
raise StopIteration
yield t
@property
def leading_ws(self):
return self.offset + self.indent * self.width
def _get_offset(self, token):
raw = u''.join(map(text_type, self._flatten_up_to_token(token)))
line = (raw or '\n').splitlines()[-1]
# Now take current offset into account and return relative offset.
return len(line) - len(self.char * self.leading_ws)
def nl(self, offset=0):
return sql.Token(
T.Whitespace,
self.n + self.char * max(0, self.leading_ws + offset))
def _next_token(self, tlist, idx=-1):
split_words = ('FROM', 'STRAIGHT_JOIN$', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT', 'HAVING')
m_split = T.Keyword, split_words, True
tidx, token = tlist.token_next_by(m=m_split, idx=idx)
if token and token.normalized == 'BETWEEN':
tidx, token = self._next_token(tlist, tidx)
if token and token.normalized == 'AND':
tidx, token = self._next_token(tlist, tidx)
return tidx, token
def _split_kwds(self, tlist):
tidx, token = self._next_token(tlist)
while token:
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
uprev = text_type(prev_)
if prev_ and prev_.is_whitespace:
del tlist.tokens[pidx]
tidx -= 1
if not (uprev.endswith('\n') or uprev.endswith('\r')):
tlist.insert_before(tidx, self.nl())
tidx += 1
tidx, token = self._next_token(tlist, tidx)
def _split_statements(self, tlist):
ttypes = T.Keyword.DML, T.Keyword.DDL
tidx, token = tlist.token_next_by(t=ttypes)
while token:
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
if prev_ and prev_.is_whitespace:
del tlist.tokens[pidx]
tidx -= 1
# only break if it's not the first token
if prev_:
tlist.insert_before(tidx, self.nl())
tidx += 1
tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
def _process(self, tlist):
func_name = '_process_{cls}'.format(cls=type(tlist).__name__)
func = getattr(self, func_name.lower(), self._process_default)
func(tlist)
def _process_where(self, tlist):
tidx, token = tlist.token_next_by(m=(T.Keyword, 'WHERE'))
# issue121, errors in statement fixed??
tlist.insert_before(tidx, self.nl())
with indent(self):
self._process_default(tlist)
def _process_parenthesis(self, tlist):
ttypes = T.Keyword.DML, T.Keyword.DDL
_, is_dml_dll = tlist.token_next_by(t=ttypes)
fidx, first = tlist.token_next_by(m=sql.Parenthesis.M_OPEN)
with indent(self, 1 if is_dml_dll else 0):
tlist.tokens.insert(0, self.nl()) if is_dml_dll else None
with offset(self, self._get_offset(first) + 1):
self._process_default(tlist, not is_dml_dll)
def _process_identifierlist(self, tlist):
identifiers = list(tlist.get_identifiers())
first = next(identifiers.pop(0).flatten())
num_offset = 1 if self.char == '\t' else self._get_offset(first)
if not tlist.within(sql.Function):
with offset(self, num_offset):
position = 0
for token in identifiers:
# Add 1 for the "," separator
position += len(token.value) + 1
if position > (self.wrap_after - self.offset):
adjust = 0
if self.comma_first:
adjust = -2
_, comma = tlist.token_prev(
tlist.token_index(token))
if comma is None:
continue
token = comma
tlist.insert_before(token, self.nl(offset=adjust))
if self.comma_first:
_, ws = tlist.token_next(
tlist.token_index(token), skip_ws=False)
if (ws is not None
and ws.ttype is not T.Text.Whitespace):
tlist.insert_after(
token, sql.Token(T.Whitespace, ' '))
position = 0
self._process_default(tlist)
def _process_case(self, tlist):
iterable = iter(tlist.get_cases())
cond, _ = next(iterable)
first = next(cond[0].flatten())
with offset(self, self._get_offset(tlist[0])):
with offset(self, self._get_offset(first)):
for cond, value in iterable:
token = value[0] if cond is None else cond[0]
tlist.insert_before(token, self.nl())
# Line breaks on group level are done. let's add an offset of
# len "when ", "then ", "else "
with offset(self, len("WHEN ")):
self._process_default(tlist)
end_idx, end = tlist.token_next_by(m=sql.Case.M_CLOSE)
tlist.insert_before(end_idx, self.nl())
def _process_default(self, tlist, stmts=True):
self._split_statements(tlist) if stmts else None
self._split_kwds(tlist)
[self._process(sgroup) for sgroup in tlist.get_sublists()]
def process(self, stmt):
self._curr_stmt = stmt
self._process(stmt)
if self._last_stmt is not None:
nl = '\n' if text_type(self._last_stmt).endswith('\n') else '\n\n'
stmt.tokens.insert(0, sql.Token(T.Whitespace, nl))
self._last_stmt = stmt
return stmt
| gpl-3.0 |
ArcherSys/ArcherVMPeridot | nodechip/node_modules/cordova/node_modules/cordova-lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/__init__.py | 81 | 21295 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
params['parallel'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--parallel', action='store_true',
env_name='GYP_PARALLEL',
help='Use multiprocessing for speed (experimental)')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
home_dot_gyp = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
# TODO(thomasvl): add support for ~/.gyp/defaults
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
if not options.parallel and options.use_environment:
p = os.environ.get('GYP_PARALLEL')
options.parallel = bool(p and p != '0')
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check,
options.circular_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
shevchenco123/28-Mansions | navigation/base_local_planner/src/local_planner_limits/__init__.py | 6 | 2807 | # Generic set of parameters to use with base local planners
# To use:
#
# from local_planner_limits import add_generic_localplanner_params
# gen = ParameterGenerator()
# add_generic_localplanner_params(gen)
# ...
#
# Using these standard parameters instead of your own allows easier switching of local planners
# need this only for dataype declarations
from dynamic_reconfigure.parameter_generator_catkin import double_t, bool_t
def add_generic_localplanner_params(gen):
# velocities
gen.add("max_trans_vel", double_t, 0, "The absolute value of the maximum translational velocity for the robot in m/s", 0.55, 0)
gen.add("min_trans_vel", double_t, 0, "The absolute value of the minimum translational velocity for the robot in m/s", 0.1, 0)
gen.add("max_vel_x", double_t, 0, "The maximum x velocity for the robot in m/s", 0.55)
gen.add("min_vel_x", double_t, 0, "The minimum x velocity for the robot in m/s", 0.0)
gen.add("max_vel_y", double_t, 0, "The maximum y velocity for the robot in m/s", 0.1)
gen.add("min_vel_y", double_t, 0, "The minimum y velocity for the robot in m/s", -0.1)
gen.add("max_rot_vel", double_t, 0, "The absolute value of the maximum rotational velocity for the robot in rad/s", 1.0, 0)
gen.add("min_rot_vel", double_t, 0, "The absolute value of the minimum rotational velocity for the robot in rad/s", 0.4, 0)
# acceleration
gen.add("acc_lim_x", double_t, 0, "The acceleration limit of the robot in the x direction", 2.5, 0, 20.0)
gen.add("acc_lim_y", double_t, 0, "The acceleration limit of the robot in the y direction", 2.5, 0, 20.0)
gen.add("acc_lim_theta", double_t, 0, "The acceleration limit of the robot in the theta direction", 3.2, 0, 20.0)
gen.add("acc_limit_trans", double_t, 0, "The absolute value of the maximum translational acceleration for the robot in m/s^2", 0.1, 0)
# # jerk
# gen.add("jerk_lim_trans", double_t, 0, "The absolute value of the maximum translational jerk for the robot in m/s^3", 0.1, 0)
# gen.add("jerk_lim_rot", double_t, 0, "The absolute value of the maximum rotational jerk for the robot in m/s^3", 0.1, 0)
gen.add("prune_plan", bool_t, 0, "Start following closest point of global plan, not first point (if different).", False)
gen.add("xy_goal_tolerance", double_t, 0, "Within what maximum distance we consider the robot to be in goal", 0.1)
gen.add("yaw_goal_tolerance", double_t, 0, "Within what maximum angle difference we consider the robot to face goal direction", 0.1)
gen.add("trans_stopped_vel", double_t, 0, "Below what maximum velocity we consider the robot to be stopped in translation", 0.1)
gen.add("rot_stopped_vel", double_t, 0, "Below what maximum rotation velocity we consider the robot to be stopped in rotation", 0.1)
| apache-2.0 |
fedora-infra/fedora-packages | fedoracommunity/connectors/bugzillahacks.py | 2 | 4419 | """ Crazy hacks to make bugzilla work without throwing SSL timeouts. """
import sys
import urllib2
import xmlrpclib
# If we're not on python2.7, we assume we're on python2.6.
PY27 = (
sys.version_info[0] == 2 and
sys.version_info[1] == 7
)
def hotpatch_bugzilla():
""" Hotpatch or "duck-punch" bugzilla.base.SafeCookieTransport
to use a longer timeout with xmlrpclib. When we ask for "all
the bugs on the kernel ever", it can take a long time.
"""
import httplib
import bugzilla.base
# This is in seconds.
longer_timeout = 300
if PY27:
if bugzilla.version == '0.7.0':
# In the case of python2.7 we apply a hot patch to
# python-bugzilla's
# SafeCookieTransport and have it pass in an SSL timeout
# to xmlrpclib.
def patched_make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTPS connection object from a host descriptor
# host may be a string, or a (host, x509-dict) tuple
try:
HTTPS = httplib.HTTPSConnection
except AttributeError:
raise NotImplementedError(
"your version of httplib doesn't support HTTPS"
)
else:
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, HTTPS(
chost,
None,
timeout=longer_timeout,
**(x509 or {})
)
return self._connection[1]
bugzilla.base.SafeCookieTransport.make_connection = \
patched_make_connection
elif bugzilla.version == '0.8.0':
# In bugzilla-0.8.0 this transport class got renamed and rewritten.
def patched_request(self, host, handler, request_body, verbose=0):
req = urllib2.Request(self.uri)
req.add_header('User-Agent', self.user_agent)
req.add_header('Content-Type', 'text/xml')
if hasattr(self, 'accept_gzip_encoding') and \
self.accept_gzip_encoding:
req.add_header('Accept-Encoding', 'gzip')
req.add_data(request_body)
resp = self.opener.open(req, timeout=longer_timeout)
# In Python 2, resp is a urllib.addinfourl instance,
# which does not
# have the getheader method that parse_response expects.
if not hasattr(resp, 'getheader'):
resp.getheader = resp.headers.getheader
if resp.code == 200:
self.verbose = verbose
return self.parse_response(resp)
resp.close()
raise xmlrpclib.ProtocolError(self.uri, resp.status,
resp.reason, resp.msg)
bugzilla.base._CookieTransport.request = patched_request
else:
# In python-bugzilla-0.9.0 we don't have to do anything, because a
# timeout is set by default in bugzilla.base._CURLTransport.
pass
else:
# In the case of python2.6, we have to do something different and apply
# a hot patch to the stdlib's httplib since xmlrpclib is written
# against an ancient backwards compatible version of httplib.
# (python-2.6's xmlrpclib is compatible with python-1.5's httplib.
# Crazy!)
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict,
timeout=longer_timeout))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
httplib.HTTPS.__init__ = __init__
| agpl-3.0 |
leighpauls/k2cro4 | third_party/webpagereplay/third_party/dns/resolver.py | 215 | 28920 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS stub resolver.
@var default_resolver: The default resolver object
@type default_resolver: dns.resolver.Resolver object"""
import socket
import sys
import time
import dns.exception
import dns.message
import dns.name
import dns.query
import dns.rcode
import dns.rdataclass
import dns.rdatatype
if sys.platform == 'win32':
import _winreg
class NXDOMAIN(dns.exception.DNSException):
"""The query name does not exist."""
pass
# The definition of the Timeout exception has moved from here to the
# dns.exception module. We keep dns.resolver.Timeout defined for
# backwards compatibility.
Timeout = dns.exception.Timeout
class NoAnswer(dns.exception.DNSException):
"""The response did not contain an answer to the question."""
pass
class NoNameservers(dns.exception.DNSException):
"""No non-broken nameservers are available to answer the query."""
pass
class NotAbsolute(dns.exception.DNSException):
"""Raised if an absolute domain name is required but a relative name
was provided."""
pass
class NoRootSOA(dns.exception.DNSException):
"""Raised if for some reason there is no SOA at the root name.
This should never happen!"""
pass
class Answer(object):
"""DNS stub resolver answer
Instances of this class bundle up the result of a successful DNS
resolution.
For convenience, the answer object implements much of the sequence
protocol, forwarding to its rrset. E.g. "for a in answer" is
equivalent to "for a in answer.rrset", "answer[i]" is equivalent
to "answer.rrset[i]", and "answer[i:j]" is equivalent to
"answer.rrset[i:j]".
Note that CNAMEs or DNAMEs in the response may mean that answer
node's name might not be the query name.
@ivar qname: The query name
@type qname: dns.name.Name object
@ivar rdtype: The query type
@type rdtype: int
@ivar rdclass: The query class
@type rdclass: int
@ivar response: The response message
@type response: dns.message.Message object
@ivar rrset: The answer
@type rrset: dns.rrset.RRset object
@ivar expiration: The time when the answer expires
@type expiration: float (seconds since the epoch)
"""
def __init__(self, qname, rdtype, rdclass, response):
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.response = response
min_ttl = -1
rrset = None
for count in xrange(0, 15):
try:
rrset = response.find_rrset(response.answer, qname,
rdclass, rdtype)
if min_ttl == -1 or rrset.ttl < min_ttl:
min_ttl = rrset.ttl
break
except KeyError:
if rdtype != dns.rdatatype.CNAME:
try:
crrset = response.find_rrset(response.answer,
qname,
rdclass,
dns.rdatatype.CNAME)
if min_ttl == -1 or crrset.ttl < min_ttl:
min_ttl = crrset.ttl
for rd in crrset:
qname = rd.target
break
continue
except KeyError:
raise NoAnswer
raise NoAnswer
if rrset is None:
raise NoAnswer
self.rrset = rrset
self.expiration = time.time() + min_ttl
def __getattr__(self, attr):
if attr == 'name':
return self.rrset.name
elif attr == 'ttl':
return self.rrset.ttl
elif attr == 'covers':
return self.rrset.covers
elif attr == 'rdclass':
return self.rrset.rdclass
elif attr == 'rdtype':
return self.rrset.rdtype
else:
raise AttributeError(attr)
def __len__(self):
return len(self.rrset)
def __iter__(self):
return iter(self.rrset)
def __getitem__(self, i):
return self.rrset[i]
def __delitem__(self, i):
del self.rrset[i]
def __getslice__(self, i, j):
return self.rrset[i:j]
def __delslice__(self, i, j):
del self.rrset[i:j]
class Cache(object):
"""Simple DNS answer cache.
@ivar data: A dictionary of cached data
@type data: dict
@ivar cleaning_interval: The number of seconds between cleanings. The
default is 300 (5 minutes).
@type cleaning_interval: float
@ivar next_cleaning: The time the cache should next be cleaned (in seconds
since the epoch.)
@type next_cleaning: float
"""
def __init__(self, cleaning_interval=300.0):
"""Initialize a DNS cache.
@param cleaning_interval: the number of seconds between periodic
cleanings. The default is 300.0
@type cleaning_interval: float.
"""
self.data = {}
self.cleaning_interval = cleaning_interval
self.next_cleaning = time.time() + self.cleaning_interval
def maybe_clean(self):
"""Clean the cache if it's time to do so."""
now = time.time()
if self.next_cleaning <= now:
keys_to_delete = []
for (k, v) in self.data.iteritems():
if v.expiration <= now:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.data[k]
now = time.time()
self.next_cleaning = now + self.cleaning_interval
def get(self, key):
"""Get the answer associated with I{key}. Returns None if
no answer is cached for the key.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@rtype: dns.resolver.Answer object or None
"""
self.maybe_clean()
v = self.data.get(key)
if v is None or v.expiration <= time.time():
return None
return v
def put(self, key, value):
"""Associate key and value in the cache.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@param value: The answer being cached
@type value: dns.resolver.Answer object
"""
self.maybe_clean()
self.data[key] = value
def flush(self, key=None):
"""Flush the cache.
If I{key} is specified, only that item is flushed. Otherwise
the entire cache is flushed.
@param key: the key to flush
@type key: (dns.name.Name, int, int) tuple or None
"""
if not key is None:
if self.data.has_key(key):
del self.data[key]
else:
self.data = {}
self.next_cleaning = time.time() + self.cleaning_interval
class Resolver(object):
"""DNS stub resolver
@ivar domain: The domain of this host
@type domain: dns.name.Name object
@ivar nameservers: A list of nameservers to query. Each nameserver is
a string which contains the IP address of a nameserver.
@type nameservers: list of strings
@ivar search: The search list. If the query name is a relative name,
the resolver will construct an absolute query name by appending the search
names one by one to the query name.
@type search: list of dns.name.Name objects
@ivar port: The port to which to send queries. The default is 53.
@type port: int
@ivar timeout: The number of seconds to wait for a response from a
server, before timing out.
@type timeout: float
@ivar lifetime: The total number of seconds to spend trying to get an
answer to the question. If the lifetime expires, a Timeout exception
will occur.
@type lifetime: float
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG key algorithm to use. The default is
dns.tsig.default_algorithm.
@type keyalgorithm: string
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: int
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar cache: The cache to use. The default is None.
@type cache: dns.resolver.Cache object
"""
def __init__(self, filename='/etc/resolv.conf', configure=True):
"""Initialize a resolver instance.
@param filename: The filename of a configuration file in
standard /etc/resolv.conf format. This parameter is meaningful
only when I{configure} is true and the platform is POSIX.
@type filename: string or file object
@param configure: If True (the default), the resolver instance
is configured in the normal fashion for the operating system
the resolver is running on. (I.e. a /etc/resolv.conf file on
POSIX systems and from the registry on Windows systems.)
@type configure: bool"""
self.reset()
if configure:
if sys.platform == 'win32':
self.read_registry()
elif filename:
self.read_resolv_conf(filename)
def reset(self):
"""Reset all resolver configuration to the defaults."""
self.domain = \
dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
if len(self.domain) == 0:
self.domain = dns.name.root
self.nameservers = []
self.search = []
self.port = 53
self.timeout = 2.0
self.lifetime = 30.0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.cache = None
def read_resolv_conf(self, f):
"""Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself."""
if isinstance(f, str) or isinstance(f, unicode):
try:
f = open(f, 'r')
except IOError:
# /etc/resolv.conf doesn't exist, can't be read, etc.
# We'll just use the default resolver configuration.
self.nameservers = ['127.0.0.1']
return
want_close = True
else:
want_close = False
try:
for l in f:
if len(l) == 0 or l[0] == '#' or l[0] == ';':
continue
tokens = l.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
self.nameservers.append(tokens[1])
elif tokens[0] == 'domain':
self.domain = dns.name.from_text(tokens[1])
elif tokens[0] == 'search':
for suffix in tokens[1:]:
self.search.append(dns.name.from_text(suffix))
finally:
if want_close:
f.close()
if len(self.nameservers) == 0:
self.nameservers.append('127.0.0.1')
def _determine_split_char(self, entry):
#
# The windows registry irritatingly changes the list element
# delimiter in between ' ' and ',' (and vice-versa) in various
# versions of windows.
#
if entry.find(' ') >= 0:
split_char = ' '
elif entry.find(',') >= 0:
split_char = ','
else:
# probably a singleton; treat as a space-separated list.
split_char = ' '
return split_char
def _config_win32_nameservers(self, nameservers):
"""Configure a NameServer registry entry."""
# we call str() on nameservers to convert it from unicode to ascii
nameservers = str(nameservers)
split_char = self._determine_split_char(nameservers)
ns_list = nameservers.split(split_char)
for ns in ns_list:
if not ns in self.nameservers:
self.nameservers.append(ns)
def _config_win32_domain(self, domain):
"""Configure a Domain registry entry."""
# we call str() on domain to convert it from unicode to ascii
self.domain = dns.name.from_text(str(domain))
def _config_win32_search(self, search):
"""Configure a Search registry entry."""
# we call str() on search to convert it from unicode to ascii
search = str(search)
split_char = self._determine_split_char(search)
search_list = search.split(split_char)
for s in search_list:
if not s in self.search:
self.search.append(dns.name.from_text(s))
def _config_win32_fromkey(self, key):
"""Extract DNS info from a registry key."""
try:
servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'Domain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
else:
try:
servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
try:
search, rtype = _winreg.QueryValueEx(key, 'SearchList')
except WindowsError:
search = None
if search:
self._config_win32_search(search)
def read_registry(self):
"""Extract resolver configuration from the Windows registry."""
lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
want_scan = False
try:
try:
# XP, 2000
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters')
want_scan = True
except EnvironmentError:
# ME
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\VxD\MSTCP')
try:
self._config_win32_fromkey(tcp_params)
finally:
tcp_params.Close()
if want_scan:
interfaces = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters'
r'\Interfaces')
try:
i = 0
while True:
try:
guid = _winreg.EnumKey(interfaces, i)
i += 1
key = _winreg.OpenKey(interfaces, guid)
if not self._win32_is_nic_enabled(lm, guid, key):
continue
try:
self._config_win32_fromkey(key)
finally:
key.Close()
except EnvironmentError:
break
finally:
interfaces.Close()
finally:
lm.Close()
def _win32_is_nic_enabled(self, lm, guid, interface_key):
# Look in the Windows Registry to determine whether the network
# interface corresponding to the given guid is enabled.
#
# (Code contributed by Paul Marks, thanks!)
#
try:
# This hard-coded location seems to be consistent, at least
# from Windows 2000 through Vista.
connection_key = _winreg.OpenKey(
lm,
r'SYSTEM\CurrentControlSet\Control\Network'
r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
r'\%s\Connection' % guid)
try:
# The PnpInstanceID points to a key inside Enum
(pnp_id, ttype) = _winreg.QueryValueEx(
connection_key, 'PnpInstanceID')
if ttype != _winreg.REG_SZ:
raise ValueError
device_key = _winreg.OpenKey(
lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
try:
# Get ConfigFlags for this device
(flags, ttype) = _winreg.QueryValueEx(
device_key, 'ConfigFlags')
if ttype != _winreg.REG_DWORD:
raise ValueError
# Based on experimentation, bit 0x1 indicates that the
# device is disabled.
return not (flags & 0x1)
finally:
device_key.Close()
finally:
connection_key.Close()
except (EnvironmentError, ValueError):
# Pre-vista, enabled interfaces seem to have a non-empty
# NTEContextList; this was how dnspython detected enabled
# nics before the code above was contributed. We've retained
# the old method since we don't know if the code above works
# on Windows 95/98/ME.
try:
(nte, ttype) = _winreg.QueryValueEx(interface_key,
'NTEContextList')
return nte is not None
except WindowsError:
return False
def _compute_timeout(self, start):
now = time.time()
if now < start:
if start - now > 1:
# Time going backwards is bad. Just give up.
raise Timeout
else:
# Time went backwards, but only a little. This can
# happen, e.g. under vmware with older linux kernels.
# Pretend it didn't happen.
now = start
duration = now - start
if duration >= self.lifetime:
raise Timeout
return min(self.lifetime - duration, self.timeout)
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None):
"""Query nameservers to find the answer to the question.
The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
of the appropriate type, or strings that can be converted into objects
of the appropriate type. E.g. For I{rdtype} the integer 2 and the
the string 'NS' both mean to query for records with DNS rdata type NS.
@param qname: the query name
@type qname: dns.name.Name object or string
@param rdtype: the query type
@type rdtype: int or string
@param rdclass: the query class
@type rdclass: int or string
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param source: bind to this IP address (defaults to machine default IP).
@type source: IP address in dotted quad notation
@rtype: dns.resolver.Answer instance
@raises Timeout: no answers could be found in the specified lifetime
@raises NXDOMAIN: the query name does not exist
@raises NoAnswer: the response did not contain an answer
@raises NoNameservers: no non-broken nameservers are available to
answer the question."""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname, None)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
qnames_to_try = []
if qname.is_absolute():
qnames_to_try.append(qname)
else:
if len(qname) > 1:
qnames_to_try.append(qname.concatenate(dns.name.root))
if self.search:
for suffix in self.search:
qnames_to_try.append(qname.concatenate(suffix))
else:
qnames_to_try.append(qname.concatenate(self.domain))
all_nxdomain = True
start = time.time()
for qname in qnames_to_try:
if self.cache:
answer = self.cache.get((qname, rdtype, rdclass))
if answer:
return answer
request = dns.message.make_query(qname, rdtype, rdclass)
if not self.keyname is None:
request.use_tsig(self.keyring, self.keyname, self.keyalgorithm)
request.use_edns(self.edns, self.ednsflags, self.payload)
response = None
#
# make a copy of the servers list so we can alter it later.
#
nameservers = self.nameservers[:]
backoff = 0.10
while response is None:
if len(nameservers) == 0:
raise NoNameservers
for nameserver in nameservers[:]:
timeout = self._compute_timeout(start)
try:
if tcp:
response = dns.query.tcp(request, nameserver,
timeout, self.port,
source=source)
else:
response = dns.query.udp(request, nameserver,
timeout, self.port,
source=source)
except (socket.error, dns.exception.Timeout):
#
# Communication failure or timeout. Go to the
# next server
#
response = None
continue
except dns.query.UnexpectedSource:
#
# Who knows? Keep going.
#
response = None
continue
except dns.exception.FormError:
#
# We don't understand what this server is
# saying. Take it out of the mix and
# continue.
#
nameservers.remove(nameserver)
response = None
continue
rcode = response.rcode()
if rcode == dns.rcode.NOERROR or \
rcode == dns.rcode.NXDOMAIN:
break
#
# We got a response, but we're not happy with the
# rcode in it. Remove the server from the mix if
# the rcode isn't SERVFAIL.
#
if rcode != dns.rcode.SERVFAIL:
nameservers.remove(nameserver)
response = None
if not response is None:
break
#
# All nameservers failed!
#
if len(nameservers) > 0:
#
# But we still have servers to try. Sleep a bit
# so we don't pound them!
#
timeout = self._compute_timeout(start)
sleep_time = min(timeout, backoff)
backoff *= 2
time.sleep(sleep_time)
if response.rcode() == dns.rcode.NXDOMAIN:
continue
all_nxdomain = False
break
if all_nxdomain:
raise NXDOMAIN
answer = Answer(qname, rdtype, rdclass, response)
if self.cache:
self.cache.put((qname, rdtype, rdclass), answer)
return answer
def use_tsig(self, keyring, keyname=None,
algorithm=dns.tsig.default_algorithm):
"""Add a TSIG signature to the query.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@param algorithm: The TSIG key algorithm to use. The default
is dns.tsig.default_algorithm.
@type algorithm: string"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
self.keyname = keyname
self.keyalgorithm = algorithm
def use_edns(self, edns, ednsflags, payload):
"""Configure Edns.
@param edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@param ednsflags: The EDNS flags
@type ednsflags: int
@param payload: The EDNS payload size. The default is 0.
@type payload: int"""
if edns is None:
edns = -1
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
default_resolver = None
def get_default_resolver():
"""Get the default resolver, initializing it if necessary."""
global default_resolver
if default_resolver is None:
default_resolver = Resolver()
return default_resolver
def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None):
"""Query nameservers to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
@see: L{dns.resolver.Resolver.query} for more information on the
parameters."""
return get_default_resolver().query(qname, rdtype, rdclass, tcp, source)
def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
"""Find the name of the zone which contains the specified name.
@param name: the query name
@type name: absolute dns.name.Name object or string
@param rdclass: The query class
@type rdclass: int
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param resolver: the resolver to use
@type resolver: dns.resolver.Resolver object or None
@rtype: dns.name.Name"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, dns.name.root)
if resolver is None:
resolver = get_default_resolver()
if not name.is_absolute():
raise NotAbsolute(name)
while 1:
try:
answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
return name
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
try:
name = name.parent()
except dns.name.NoParent:
raise NoRootSOA
| bsd-3-clause |
suiyuan2009/tensorflow | tensorflow/contrib/bayesflow/python/ops/stochastic_graph.py | 81 | 1256 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for Stochastic Computation Graphs.
See the @{$python/contrib.bayesflow.stochastic_graph} guide.
@@surrogate_loss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.stochastic_graph_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"surrogate_loss"
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
kaksmet/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/stream.py | 673 | 2748 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file exports public symbols.
"""
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import InvalidUTF8Exception
from mod_pywebsocket._stream_base import UnsupportedFrameException
from mod_pywebsocket._stream_hixie75 import StreamHixie75
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
# These methods are intended to be used by WebSocket client developers to have
# their implementations receive broken data in tests.
from mod_pywebsocket._stream_hybi import create_close_frame
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import create_ping_frame
from mod_pywebsocket._stream_hybi import create_pong_frame
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_text_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
# vi:sts=4 sw=4 et
| mpl-2.0 |
IDSIA/sacred | examples/02_hello_config_dict.py | 1 | 1196 | #!/usr/bin/env python
# coding=utf-8
""" A configurable Hello World "experiment".
In this example we configure the message using a dictionary with
``ex.add_config``
You can run it like this::
$ ./02_hello_config_dict.py
WARNING - 02_hello_config_dict - No observers have been added to this run
INFO - 02_hello_config_dict - Running command 'main'
INFO - 02_hello_config_dict - Started
Hello world!
INFO - 02_hello_config_dict - Completed after 0:00:00
The message can also easily be changed using the ``with`` command-line
argument::
$ ./02_hello_config_dict.py with message='Ciao world!'
WARNING - 02_hello_config_dict - No observers have been added to this run
INFO - 02_hello_config_dict - Running command 'main'
INFO - 02_hello_config_dict - Started
Ciao world!
INFO - 02_hello_config_dict - Completed after 0:00:00
"""
from sacred import Experiment
ex = Experiment()
# We add message to the configuration of the experiment here
ex.add_config({"message": "Hello world!"})
# Equivalent:
# ex.add_config(
# message="Hello world!"
# )
# notice how we can access the message here by taking it as an argument
@ex.automain
def main(message):
print(message)
| mit |
espadrine/opera | chromium/src/tools/perf_expectations/PRESUBMIT.py | 129 | 1407 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for perf_expectations.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
PERF_EXPECTATIONS = 'tools/perf_expectations/perf_expectations.json'
CONFIG_FILE = 'tools/perf_expectations/chromium_perf_expectations.cfg'
def CheckChangeOnUpload(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
return output
def CheckChangeOnCommit(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
output.extend(input_api.canned_checks.CheckDoNotSubmit(input_api,
output_api))
return output
| bsd-3-clause |
sander76/home-assistant | homeassistant/components/lyft/sensor.py | 5 | 8933 | """Support for the Lyft API."""
from datetime import timedelta
import logging
from lyft_rides.auth import ClientCredentialGrant
from lyft_rides.client import LyftRidesClient
from lyft_rides.errors import APIError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_END_LATITUDE = "end_latitude"
CONF_END_LONGITUDE = "end_longitude"
CONF_PRODUCT_IDS = "product_ids"
CONF_START_LATITUDE = "start_latitude"
CONF_START_LONGITUDE = "start_longitude"
ICON = "mdi:taxi"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_START_LATITUDE): cv.latitude,
vol.Optional(CONF_START_LONGITUDE): cv.longitude,
vol.Optional(CONF_END_LATITUDE): cv.latitude,
vol.Optional(CONF_END_LONGITUDE): cv.longitude,
vol.Optional(CONF_PRODUCT_IDS): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lyft sensor."""
auth_flow = ClientCredentialGrant(
client_id=config.get(CONF_CLIENT_ID),
client_secret=config.get(CONF_CLIENT_SECRET),
scopes="public",
is_sandbox_mode=False,
)
try:
session = auth_flow.get_session()
timeandpriceest = LyftEstimate(
session,
config.get(CONF_START_LATITUDE, hass.config.latitude),
config.get(CONF_START_LONGITUDE, hass.config.longitude),
config.get(CONF_END_LATITUDE),
config.get(CONF_END_LONGITUDE),
)
timeandpriceest.fetch_data()
except APIError as exc:
_LOGGER.error("Error setting up Lyft platform: %s", exc)
return False
wanted_product_ids = config.get(CONF_PRODUCT_IDS)
dev = []
for product_id, product in timeandpriceest.products.items():
if (wanted_product_ids is not None) and (product_id not in wanted_product_ids):
continue
dev.append(LyftSensor("time", timeandpriceest, product_id, product))
if product.get("estimate") is not None:
dev.append(LyftSensor("price", timeandpriceest, product_id, product))
add_entities(dev, True)
class LyftSensor(SensorEntity):
"""Implementation of an Lyft sensor."""
def __init__(self, sensorType, products, product_id, product):
"""Initialize the Lyft sensor."""
self.data = products
self._product_id = product_id
self._product = product
self._sensortype = sensorType
self._name = f"{self._product['display_name']} {self._sensortype}"
if "lyft" not in self._name.lower():
self._name = f"Lyft{self._name}"
if self._sensortype == "time":
self._unit_of_measurement = TIME_MINUTES
elif self._sensortype == "price":
estimate = self._product["estimate"]
if estimate is not None:
self._unit_of_measurement = estimate.get("currency")
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes."""
params = {
"Product ID": self._product["ride_type"],
"Product display name": self._product["display_name"],
"Vehicle Capacity": self._product["seats"],
}
if self._product.get("pricing_details") is not None:
pricing_details = self._product["pricing_details"]
params["Base price"] = pricing_details.get("base_charge")
params["Cancellation fee"] = pricing_details.get("cancel_penalty_amount")
params["Minimum price"] = pricing_details.get("cost_minimum")
params["Cost per mile"] = pricing_details.get("cost_per_mile")
params["Cost per minute"] = pricing_details.get("cost_per_minute")
params["Price currency code"] = pricing_details.get("currency")
params["Service fee"] = pricing_details.get("trust_and_service")
if self._product.get("estimate") is not None:
estimate = self._product["estimate"]
params["Trip distance (in miles)"] = estimate.get(
"estimated_distance_miles"
)
params["High price estimate (in cents)"] = estimate.get(
"estimated_cost_cents_max"
)
params["Low price estimate (in cents)"] = estimate.get(
"estimated_cost_cents_min"
)
params["Trip duration (in seconds)"] = estimate.get(
"estimated_duration_seconds"
)
params["Prime Time percentage"] = estimate.get("primetime_percentage")
if self._product.get("eta") is not None:
eta = self._product["eta"]
params["Pickup time estimate (in seconds)"] = eta.get("eta_seconds")
return {k: v for k, v in params.items() if v is not None}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data from the Lyft API and update the states."""
self.data.update()
try:
self._product = self.data.products[self._product_id]
except KeyError:
return
self._state = None
if self._sensortype == "time":
eta = self._product["eta"]
if (eta is not None) and (eta.get("is_valid_estimate")):
time_estimate = eta.get("eta_seconds")
if time_estimate is None:
return
self._state = int(time_estimate / 60)
elif self._sensortype == "price":
estimate = self._product["estimate"]
if (estimate is not None) and estimate.get("is_valid_estimate"):
self._state = (
int(
(
estimate.get("estimated_cost_cents_min", 0)
+ estimate.get("estimated_cost_cents_max", 0)
)
/ 2
)
/ 100
)
class LyftEstimate:
"""The class for handling the time and price estimate."""
def __init__(
self,
session,
start_latitude,
start_longitude,
end_latitude=None,
end_longitude=None,
):
"""Initialize the LyftEstimate object."""
self._session = session
self.start_latitude = start_latitude
self.start_longitude = start_longitude
self.end_latitude = end_latitude
self.end_longitude = end_longitude
self.products = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest product info and estimates from the Lyft API."""
try:
self.fetch_data()
except APIError as exc:
_LOGGER.error("Error fetching Lyft data: %s", exc)
def fetch_data(self):
"""Get the latest product info and estimates from the Lyft API."""
client = LyftRidesClient(self._session)
self.products = {}
products_response = client.get_ride_types(
self.start_latitude, self.start_longitude
)
products = products_response.json.get("ride_types")
for product in products:
self.products[product["ride_type"]] = product
if self.end_latitude is not None and self.end_longitude is not None:
price_response = client.get_cost_estimates(
self.start_latitude,
self.start_longitude,
self.end_latitude,
self.end_longitude,
)
prices = price_response.json.get("cost_estimates", [])
for price in prices:
product = self.products[price["ride_type"]]
if price.get("is_valid_estimate"):
product["estimate"] = price
eta_response = client.get_pickup_time_estimates(
self.start_latitude, self.start_longitude
)
etas = eta_response.json.get("eta_estimates")
for eta in etas:
if eta.get("is_valid_estimate"):
self.products[eta["ride_type"]]["eta"] = eta
| apache-2.0 |
qpython-android/QPypi-numpy | numpy/numarray/ufuncs.py | 102 | 1358 |
__all__ = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh',
'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_not',
'bitwise_or', 'bitwise_xor', 'ceil', 'cos', 'cosh', 'divide',
'equal', 'exp', 'fabs', 'floor', 'floor_divide',
'fmod', 'greater', 'greater_equal', 'hypot', 'isnan',
'less', 'less_equal', 'log', 'log10', 'logical_and', 'logical_not',
'logical_or', 'logical_xor', 'lshift', 'maximum', 'minimum',
'minus', 'multiply', 'negative', 'not_equal',
'power', 'product', 'remainder', 'rshift', 'sin', 'sinh', 'sqrt',
'subtract', 'sum', 'tan', 'tanh', 'true_divide',
'conjugate', 'sign']
from numpy import absolute as abs, absolute, add, arccos, arccosh, arcsin, \
arcsinh, arctan, arctan2, arctanh, bitwise_and, invert as bitwise_not, \
bitwise_or, bitwise_xor, ceil, cos, cosh, divide, \
equal, exp, fabs, floor, floor_divide, fmod, greater, greater_equal, \
hypot, isnan, less, less_equal, log, log10, logical_and, \
logical_not, logical_or, logical_xor, left_shift as lshift, \
maximum, minimum, negative as minus, multiply, negative, \
not_equal, power, product, remainder, right_shift as rshift, sin, \
sinh, sqrt, subtract, sum, tan, tanh, true_divide, conjugate, sign
| bsd-3-clause |
odicraig/kodi2odi | addons/plugin.radio.disneyjr/default.py | 2 | 7191 | #############################################################################
#############################################################################
import common
from common import *
from common import (addon_id,addon_name,addon_path)
#############################################################################
#############################################################################
ACTION_PREVIOUS_MENU = 10 ## ESC action
ACTION_NAV_BACK = 92 ## Backspace action
ACTION_MOVE_LEFT = 1 ## Left arrow key
ACTION_MOVE_RIGHT = 2 ## Right arrow key
ACTION_MOVE_UP = 3 ## Up arrow key
ACTION_MOVE_DOWN = 4 ## Down arrow key
ACTION_MOUSE_WHEEL_UP = 104 ## Mouse wheel up
ACTION_MOUSE_WHEEL_DOWN = 105 ## Mouse wheel down
ACTION_MOUSE_DRAG = 106 ## Mouse drag
ACTION_MOUSE_MOVE = 107 ## Mouse move
#
ACTION_KEY_P = 79 ## P - Pause
ACTION_KEY_R = 78 ## R - Rewind
ACTION_KEY_F = 77 ## F - Fast Forward
ACTION_SELECT_ITEM = 7 ## ?
ACTION_PARENT_DIR = 9 ## ?
ACTION_CONTEXT_MENU = 117 ## ?
ACTION_NEXT_ITEM = 14 ## ?
ACTION_BACKSPACE = 110 ## ?
#
ACTION_KEY_X = 13 ## X - Stop
ACTION_aID_0 = 0 ## ???
#
ACTION_REMOTE_MUTE = 91 ## MUTE
#ACTION_REMOTE_FULLSCREEN = ?? ## FullScreen
ACTION_REMOTE_INFO = 11 ## Info
ACTION_REMOTE_PLAYPAUSE = 12 ## Play / Pause
ACTION_REMOTE_CONTEXTMENU = 117 ## Context Menu
ACTION_REMOTE_STOP = 13 ## Stop
#
ACTION_KEY_VOL_MINUS = 89 ## F - Fast Forward
ACTION_KEY_VOL_PLUS = 88 ## F - Fast Forward
#
ACTION_SHOW_FULLSCREEN = 36 ## Show Full Screen
ACTION_TOGGLE_FULLSCREEN = 199 ## Toggle Full Screen
#############################################################################
#############################################################################
d=xbmcgui.Dialog();
class CustomWindow(xbmcgui.WindowXML):
#class CustomWindow(xbmcgui.WindowXMLDialog):
closing=False; firsttime=False; c={}; strXMLname=''; strFallbackPath='';
##
def __init__(self,strXMLname,strFallbackPath):
self.strXMLname=strXMLname
self.strFallbackPath=strFallbackPath
##
def onInit(self):
try: self.wID=xbmcgui.getCurrentWindowId()
except: self.wID=0
deb('CurrentWindowId()',str(self.wID));
deb('getResolution()',str(self.getResolution()));
self.firsttime=True
self.LoadSkinItems()
self.setupScreen()
try: self.setFocus(self.bExit)
except: pass
pass
##
def setupScreen(self):
maxW=1280; maxH=720;
#self.iBack.setImage(artp("black1"));
#self.iBackground.setImage(MediaFile("snow_town_02b.gif"));
##
def LoadSkinItems(self):
try:
self.c['iBack']=1;
self.c['iBackground']=2;
self.c['bExit']=10;
self.c['bPlayMP3']=14;
self.c['bPlayMP4']=13;
self.c['bStop']=11;
try: self.iBack=self.getControl(self.c['iBack']);
except: pass
try: self.iBackground=self.getControl(self.c['iBackground']);
except: pass
try: self.bExit=self.getControl(self.c['bExit']);
except: pass
try: self.bPlayMP3=self.getControl(self.c['bPlayMP3']);
except: pass
try: self.bPlayMP4=self.getControl(self.c['bPlayMP4']);
except: pass
try: self.bStop=self.getControl(self.c['bStop']);
except: pass
#self.c['TestObj1']=5;
#self.c['TestObj2']=4;
#try: self.TestObj1=self.getControl(self.c['TestObj1']);
#except: pass
#try: self.TestObj2=self.getControl(self.c['TestObj2']);
#except: pass
#self.TestObj1.setPosition(1117,0)
#self.TestObj2.setPosition(0,620)
except: pass
##
def onClick(self,controlId):
try:
if controlId==self.c['bExit']: self.AskToClose()
except Exception,e: debob(["Error",e])
except: pass
def onAction(self,action):
try:
actId=int(action.getId()); actIds=str(action.getId()); actBC=str(action.getButtonCode()); xx=0; yy=0;
try: actAmnt1=action.getAmount1()
except: pass
try: actAmnt2=action.getAmount2()
except: pass
if action==ACTION_PREVIOUS_MENU: self.AskToClose()
elif action==ACTION_NAV_BACK: self.AskToClose()
# elif action==ACTION_MOVE_LEFT: #1
# debob({'getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif action==ACTION_MOVE_RIGHT: #2
# debob({'getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif action==ACTION_MOVE_UP: #3
# debob({'getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif action==ACTION_MOVE_DOWN: #4
# debob({'getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif action==ACTION_MOUSE_WHEEL_UP: #104
# debob({'getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif action==ACTION_MOUSE_WHEEL_DOWN: #105
# debob({'getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif action==ACTION_MOUSE_MOVE: #107
# #debob({'action type':'MOUSE MOVE','getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif action==ACTION_MOUSE_DRAG: #106
# #debob({'getId':actId,'getButtonCode':actBC,'getAmount1':actAmnt1,'getAmount2':actAmnt2})
# pass
# elif actId == 100:
# #deb("Remote Button Pressed","100"); deb('action.getId',str(actIds));
# pass
# elif actId == ACTION_aID_0: pass
# elif actId == ACTION_KEY_R: return
# elif actId == ACTION_KEY_F: return
# elif actId == ACTION_REMOTE_INFO: return
# elif actId == ACTION_REMOTE_MUTE: return
# elif actId == ACTION_REMOTE_CONTEXTMENU: return
# elif actId == ACTION_REMOTE_PLAYPAUSE: return
# elif actId == ACTION_REMOTE_STOP: return
# elif actId == ACTION_KEY_X: return
# else:
# if not actId==0:
# pass
# ##
# ##
# ##
except Exception,e: debob(["Error",e]); debob([actId,actIds,actBC])
except: pass
def CloseWindow(self):
try:
self.closing=True;
except: pass
self.close()
def CW(self): self.CloseWindow()
def AskToClose(self):
try:
if self.closing==False:
if d.yesno(addonName," ","Are you sure that you want to exit?","","No","Yes"): self.closing=True; self.CloseWindow()
else: self.CloseWindow()
except: pass
##
######
#############################################################################
#############################################################################
skinFilename='CustomWindow001.xml'
try: Emulating=xbmcgui.Emulating
except: Emulating=False
if __name__=='__main__':
#cWind=CustomWindow(skinFilename,addon_path,'default')
cWind=CustomWindow(skinFilename,addon_path) #,'default'
cWind.doModal()
del cWind
sys.modules.clear()
#############################################################################
#############################################################################
| gpl-3.0 |
0xMF/alpha | paucore/stats/statsd_client.py | 3 | 5264 | import socket
import binascii
import re
import logging
import time
from contextlib import contextmanager
from functools import wraps
from django.conf import settings
from paucore.stats.slowjam import slowjam_context
logger = logging.getLogger(__name__)
class DummySocket(object):
def sendto(self, value, addr):
pass
class StatsdClient(object):
"""A very basic client for aggregating application stats to statsd/graphite
Some guiding ideas:
1- Always log stats to a bucket based on the current machine (and region)
2- Most stats are based on the current application and evironment so make that the default prefix
3- Some metrics don't fit neatly within an application bucket (ex: size of message queues). Allow overriding the
prefix/bucket so the stat gets logged to somewhere other than #2
"""
def __init__(self, hosts, app, environment, region, machine):
if settings.UNIT_TESTING:
self.sock = DummySocket()
else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addrs = []
for host in hosts:
host, port = host.split(':', 1)
port = int(port)
addrs.append((socket.gethostbyname(host), port))
self.addrs = addrs
self.count = len(addrs)
self.app = app
self.environment = environment
self.region = region
self.machine = re.sub(r"\.", "-", machine)
def get_addr(self, stat):
return self.addrs[(binascii.crc32(stat) & 0xffffffff) % self.count]
def make_prefix(self, prefix):
if prefix is None:
return '%s.%s' % (self.environment, self.app)
else:
return prefix
def graphite_duration(self, stat, elapsed_time, prefix=None, fmt=None, extras=None):
try:
self.sock.sendto('all.%s.%s:%d|ms' % (self.make_prefix(prefix), stat, elapsed_time), self.get_addr(stat))
except:
logger.exception('Dropping graphite_duration stat=%s on floor:', stat)
def graphite_count(self, stat, delta, prefix=None, fmt=None, extras=None):
try:
self.sock.sendto('all.%s.%s:%d|c' % (self.make_prefix(prefix), stat, delta), self.get_addr(stat))
except:
logger.exception('Dropping graphite_count=%s on floor:', stat)
def graphite_gauge(self, stat, value, prefix=None):
# n.b., you should probably only use these from guaranteed-unique tasks
try:
self.sock.sendto('all.%s.%s:%d|g' % (self.make_prefix(prefix), stat, value), self.get_addr(stat))
except:
logger.exception('Dropping graphite_gauge=%s on floor:', stat)
def graphite_set(self, stat, value, prefix=None):
# only put id-like values in here
try:
self.sock.sendto('all.%s.%s:%s|s' % (self.make_prefix(prefix), stat, value), self.get_addr(stat))
except:
logger.exception('Dropping graphite_set=%s on floor:', stat)
app = getattr(settings, 'HOSTING_APP', 'unknown')
environment = getattr(settings, 'ENVIRONMENT', 'unknown')
region = getattr(settings, 'REGION', 'unknown')
machine = getattr(settings, 'SERVER_HOSTNAME', 'unknown')
hosts = getattr(settings, 'STATSD_HOSTS', ['localhost:8125'])
_instance = StatsdClient(hosts, app, environment, region, machine)
def graphite_duration(stat, elapsed_time, prefix=None):
# TODO: Hook this up to slowjam?
if _instance:
_instance.graphite_duration(stat, elapsed_time, prefix)
def graphite_count(stat, delta=1, prefix=None, fmt=None, extras=None, tag=None):
slowjam_context.mark(stat, fmt, extras, tag)
if _instance:
_instance.graphite_count(stat, delta, prefix)
def graphite_increment(stats, prefix=None):
graphite_count(stats, 1, prefix)
def graphite_decrement(stats, prefix=None):
graphite_count(stats, -1, prefix)
def graphite_gauge(stat, value, prefix=None):
if _instance:
_instance.graphite_gauge(stat, value, prefix=prefix)
def graphite_set(stat, value, prefix=None):
if _instance:
_instance.graphite_set(stat, value, prefix=prefix)
@contextmanager
def graphite_timer(stat, label=None, logger=logging.getLogger('timer'), prefix=None, extras=None, tag=None):
start_time = time.time()
with slowjam_context.event(stat, fmt=label, extras=extras, tag=tag) as ctx:
yield ctx
end_time = time.time()
request_time_ms = (end_time - start_time) * 1000
if _instance:
_instance.graphite_duration(stat, request_time_ms, prefix)
if label:
logger.info("%s took %dms" % (label, request_time_ms))
def timer(stat, label=None, logger=logging.getLogger('timer'), prefix=None, extras=None, tag=None, log_args=False):
def timer_decorator(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
_extras = dict(extras) if extras else {}
if log_args:
if args:
_extras['args'] = args
if kwargs:
_extras['kwargs'] = kwargs
with graphite_timer(stat, label=label, logger=logger, prefix=prefix, extras=_extras, tag=tag):
return f(*args, **kwargs)
return wrapped_f
return timer_decorator
| mit |
GbalsaC/bitnamiP | common/djangoapps/util/tests/test_milestones_helpers.py | 76 | 4004 | """
Tests for the milestones helpers library, which is the integration point for the edx_milestones API
"""
from mock import patch
from milestones.exceptions import InvalidCourseKeyException, InvalidUserException
from util import milestones_helpers
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@patch.dict('django.conf.settings.FEATURES', {'MILESTONES_APP': False})
class MilestonesHelpersTestCase(ModuleStoreTestCase):
"""
Main test suite for Milestones API client library
"""
def setUp(self):
"""
Test case scaffolding
"""
super(MilestonesHelpersTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.user = {'id': '123'}
self.milestone = {
'name': 'Test Milestone',
'namespace': 'doesnt.matter',
'description': 'Testing Milestones Helpers Library',
}
def test_add_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_milestone(milestone_data=self.milestone)
self.assertIsNone(response)
def test_get_milestones_returns_none_when_app_disabled(self):
response = milestones_helpers.get_milestones(namespace="whatever")
self.assertEqual(len(response), 0)
def test_get_milestone_relationship_types_returns_none_when_app_disabled(self):
response = milestones_helpers.get_milestone_relationship_types()
self.assertEqual(len(response), 0)
def test_add_course_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_course_milestone(unicode(self.course.id), 'requires', self.milestone)
self.assertIsNone(response)
def test_get_course_milestones_returns_none_when_app_disabled(self):
response = milestones_helpers.get_course_milestones(unicode(self.course.id))
self.assertEqual(len(response), 0)
def test_add_course_content_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_course_content_milestone(
unicode(self.course.id),
'i4x://any/content/id',
'requires',
self.milestone
)
self.assertIsNone(response)
def test_get_course_content_milestones_returns_none_when_app_disabled(self):
response = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
'i4x://doesnt/matter/for/this/test',
'requires'
)
self.assertEqual(len(response), 0)
def test_remove_content_references_returns_none_when_app_disabled(self):
response = milestones_helpers.remove_content_references("i4x://any/content/id/will/do")
self.assertIsNone(response)
def test_get_namespace_choices_returns_values_when_app_disabled(self):
response = milestones_helpers.get_namespace_choices()
self.assertIn('ENTRANCE_EXAM', response)
def test_get_course_milestones_fulfillment_paths_returns_none_when_app_disabled(self):
response = milestones_helpers.get_course_milestones_fulfillment_paths(unicode(self.course.id), self.user)
self.assertIsNone(response)
def test_add_user_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_user_milestone(self.user, self.milestone)
self.assertIsNone(response)
@patch.dict('django.conf.settings.FEATURES', {'MILESTONES_APP': True})
def test_any_unfulfilled_milestones(self):
""" Tests any_unfulfilled_milestones for invalid arguments """
with self.assertRaises(InvalidCourseKeyException):
milestones_helpers.any_unfulfilled_milestones(None, self.user)
with self.assertRaises(InvalidUserException):
milestones_helpers.any_unfulfilled_milestones(self.course.id, None)
| agpl-3.0 |
402231466/cda-0512 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_functiontestcase.py | 791 | 5478 | import unittest
from .support import LoggingResult
class Test_FunctionTestCase(unittest.TestCase):
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertIsInstance(test.id(), str)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
| agpl-3.0 |
raychorn/knowu | django/djangononrelsample2/users/templatetags/smart_if.py | 8 | 13247 | """
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
"""
import unittest
from django import template
register = template.Library()
#==============================================================================
# Calculation objects
#==============================================================================
class BaseCalc(object):
def __init__(self, var1, var2=None, negate=False):
self.var1 = var1
self.var2 = var2
self.negate = negate
def resolve(self, context):
try:
var1, var2 = self.resolve_vars(context)
outcome = self.calculate(var1, var2)
except:
outcome = False
if self.negate:
return not outcome
return outcome
def resolve_vars(self, context):
var2 = self.var2 and self.var2.resolve(context)
return self.var1.resolve(context), var2
def calculate(self, var1, var2):
raise NotImplementedError()
class Or(BaseCalc):
def calculate(self, var1, var2):
return var1 or var2
class And(BaseCalc):
def calculate(self, var1, var2):
return var1 and var2
class Equals(BaseCalc):
def calculate(self, var1, var2):
return var1 == var2
class Greater(BaseCalc):
def calculate(self, var1, var2):
return var1 > var2
class GreaterOrEqual(BaseCalc):
def calculate(self, var1, var2):
return var1 >= var2
class In(BaseCalc):
def calculate(self, var1, var2):
return var1 in var2
#==============================================================================
# Tests
#==============================================================================
class TestVar(object):
"""
A basic self-resolvable object similar to a Django template variable. Used
to assist with tests.
"""
def __init__(self, value):
self.value = value
def resolve(self, context):
return self.value
class SmartIfTests(unittest.TestCase):
def setUp(self):
self.true = TestVar(True)
self.false = TestVar(False)
self.high = TestVar(9000)
self.low = TestVar(1)
def assertCalc(self, calc, context=None):
"""
Test a calculation is True, also checking the inverse "negate" case.
"""
context = context or {}
self.assert_(calc.resolve(context))
calc.negate = not calc.negate
self.assertFalse(calc.resolve(context))
def assertCalcFalse(self, calc, context=None):
"""
Test a calculation is False, also checking the inverse "negate" case.
"""
context = context or {}
self.assertFalse(calc.resolve(context))
calc.negate = not calc.negate
self.assert_(calc.resolve(context))
def test_or(self):
self.assertCalc(Or(self.true))
self.assertCalcFalse(Or(self.false))
self.assertCalc(Or(self.true, self.true))
self.assertCalc(Or(self.true, self.false))
self.assertCalc(Or(self.false, self.true))
self.assertCalcFalse(Or(self.false, self.false))
def test_and(self):
self.assertCalc(And(self.true, self.true))
self.assertCalcFalse(And(self.true, self.false))
self.assertCalcFalse(And(self.false, self.true))
self.assertCalcFalse(And(self.false, self.false))
def test_equals(self):
self.assertCalc(Equals(self.low, self.low))
self.assertCalcFalse(Equals(self.low, self.high))
def test_greater(self):
self.assertCalc(Greater(self.high, self.low))
self.assertCalcFalse(Greater(self.low, self.low))
self.assertCalcFalse(Greater(self.low, self.high))
def test_greater_or_equal(self):
self.assertCalc(GreaterOrEqual(self.high, self.low))
self.assertCalc(GreaterOrEqual(self.low, self.low))
self.assertCalcFalse(GreaterOrEqual(self.low, self.high))
def test_in(self):
list_ = TestVar([1,2,3])
invalid_list = TestVar(None)
self.assertCalc(In(self.low, list_))
self.assertCalcFalse(In(self.low, invalid_list))
def test_parse_bits(self):
var = IfParser([True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'and', True]).parse()
self.assertFalse(var.resolve({}))
var = IfParser(['not', False, 'and', 'not', False]).parse()
self.assert_(var.resolve({}))
var = IfParser(['not', 'not', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, 'not', '=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'not', 'not', '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '!=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([3, '>', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '<', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([2, 'not', 'in', [2, 3]]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'or', 1, '=', 2]).parse()
self.assert_(var.resolve({}))
def test_boolean(self):
var = IfParser([True, 'and', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', False, 'or', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'and', True, 'or', False]).parse()
self.assertFalse(var.resolve({}))
def test_invalid(self):
self.assertRaises(ValueError, IfParser(['not']).parse)
self.assertRaises(ValueError, IfParser(['==']).parse)
self.assertRaises(ValueError, IfParser([1, 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '>', 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '==', 'not', 'not']).parse)
self.assertRaises(ValueError, IfParser([1, 2]).parse)
OPERATORS = {
'=': (Equals, True),
'==': (Equals, True),
'!=': (Equals, False),
'>': (Greater, True),
'>=': (GreaterOrEqual, True),
'<=': (Greater, False),
'<': (GreaterOrEqual, False),
'or': (Or, True),
'and': (And, True),
'in': (In, True),
}
BOOL_OPERATORS = ('or', 'and')
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
self.tokens = tokens
def _get_tokens(self):
return self._tokens
def _set_tokens(self, tokens):
self._tokens = tokens
self.len = len(tokens)
self.pos = 0
tokens = property(_get_tokens, _set_tokens)
def parse(self):
if self.at_end():
raise self.error_class('No variables provided.')
var1 = self.get_bool_var()
while not self.at_end():
op, negate = self.get_operator()
var2 = self.get_bool_var()
var1 = op(var1, var2, negate=negate)
return var1
def get_token(self, eof_message=None, lookahead=False):
negate = True
token = None
pos = self.pos
while token is None or token == 'not':
if pos >= self.len:
if eof_message is None:
raise self.error_class()
raise self.error_class(eof_message)
token = self.tokens[pos]
negate = not negate
pos += 1
if not lookahead:
self.pos = pos
return token, negate
def at_end(self):
return self.pos >= self.len
def create_var(self, value):
return TestVar(value)
def get_bool_var(self):
"""
Returns either a variable by itself or a non-boolean operation (such as
``x == 0`` or ``x < 0``).
This is needed to keep correct precedence for boolean operations (i.e.
``x or x == 0`` should be ``x or (x == 0)``, not ``(x or x) == 0``).
"""
var = self.get_var()
if not self.at_end():
op_token = self.get_token(lookahead=True)[0]
if isinstance(op_token, basestring) and (op_token not in
BOOL_OPERATORS):
op, negate = self.get_operator()
return op(var, self.get_var(), negate=negate)
return var
def get_var(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting a variable.')
if isinstance(token, basestring) and token in OPERATORS:
raise self.error_class('Expected variable, got operator (%s).' %
token)
var = self.create_var(token)
if negate:
return Or(var, negate=True)
return var
def get_operator(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting an operator.')
if not isinstance(token, basestring) or token not in OPERATORS:
raise self.error_class('%s is not a valid operator.' % token)
if self.at_end():
raise self.error_class('No variable provided after "%s".' % token)
op, true = OPERATORS[token]
if not true:
negate = not negate
return op, negate
#==============================================================================
# Actual templatetag code.
#==============================================================================
class TemplateIfParser(IfParser):
error_class = template.TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
return super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return self.template_parser.compile_filter(value)
class SmartIfNode(template.Node):
def __init__(self, var, nodelist_true, nodelist_false=None):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.var = var
def render(self, context):
if self.var.resolve(context):
return self.nodelist_true.render(context)
if self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def __repr__(self):
return "<Smart If node>"
def __iter__(self):
for node in self.nodelist_true:
yield node
if self.nodelist_false:
for node in self.nodelist_false:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_true.get_nodes_by_type(nodetype))
if self.nodelist_false:
nodes.extend(self.nodelist_false.get_nodes_by_type(nodetype))
return nodes
@register.tag('if')
def smart_if(parser, token):
"""
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``.
"""
bits = token.split_contents()[1:]
var = TemplateIfParser(parser, bits).parse()
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = None
return SmartIfNode(var, nodelist_true, nodelist_false)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
yangchandle/django_ecommerce | env/lib/python3.5/site-packages/pip/basecommand.py | 92 | 11429 | """Base Command class, and related routines"""
from __future__ import absolute_import
import logging
import os
import sys
import optparse
import warnings
from pip import cmdoptions
from pip.index import PackageFinder
from pip.locations import running_under_virtualenv
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.compat import logging_dictConfig
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.req import InstallRequirement, parse_requirements
from pip.status_codes import (
SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR,
)
from pip.utils import deprecation, get_prog, normalize_path
from pip.utils.logging import IndentingFormatter
from pip.utils.outdated import pip_version_check
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(object):
name = None
usage = None
hidden = False
log_streams = ("ext://sys.stdout", "ext://sys.stderr")
def __init__(self, isolated=False):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
'isolated': isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
if options.quiet:
if options.quiet == 1:
level = "WARNING"
if options.quiet == 2:
level = "ERROR"
else:
level = "CRITICAL"
elif options.verbose:
level = "DEBUG"
else:
level = "INFO"
logging_dictConfig({
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
},
"handlers": {
"console": {
"level": level,
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_streams[0],
"filters": ["exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_streams[1],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": options.log or "/dev/null",
"delay": True,
"formatter": "indent",
},
},
"root": {
"level": level,
"handlers": list(filter(None, [
"console",
"console_errors",
"user_log" if options.log else None,
])),
},
# Disable any logging besides WARNING unless we have DEBUG level
# logging enabled. These use both pip._vendor and the bare names
# for the case where someone unbundles our libraries.
"loggers": dict(
(
name,
{
"level": (
"WARNING"
if level in ["INFO", "ERROR"]
else "DEBUG"
),
},
)
for name in ["pip._vendor", "distlib", "requests", "urllib3"]
),
})
if sys.version_info[:2] == (2, 6):
warnings.warn(
"Python 2.6 is no longer supported by the Python core team, "
"please upgrade your Python. A future version of pip will "
"drop support for Python 2.6",
deprecation.Python26DeprecationWarning
)
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:', exc_info=True)
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:', exc_info=True)
return ERROR
except:
logger.critical('Exception:', exc_info=True)
return UNKNOWN_ERROR
finally:
# Check if we're using the latest version of pip available
if (not options.disable_pip_version_check and not
getattr(options, "no_index", False)):
with self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)) as session:
pip_version_check(session)
return SUCCESS
class RequirementCommand(Command):
@staticmethod
def populate_requirement_set(requirement_set, args, options, finder,
session, name, wheel_cache):
"""
Marshal cmd line args into a requirement set.
"""
for filename in options.constraints:
for req in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session, wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
for req in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
req, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for req in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
req,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
found_req_in_file = False
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache):
found_req_in_file = True
requirement_set.add_requirement(req)
# If --require-hashes was a line in a requirements file, tell
# RequirementSet about it:
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or found_req_in_file):
opts = {'name': name}
if options.find_links:
msg = ('You must give at least one requirement to '
'%(name)s (maybe you meant "pip %(name)s '
'%(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warning(msg)
def _build_package_finder(self, options, session):
"""
Create a package finder appropriate to this requirement command.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
| mit |
Ulauncher/Ulauncher | ulauncher/utils/AutostartPreference.py | 1 | 2421 | import os
from xdg.BaseDirectory import xdg_config_home
from ulauncher.search.apps.AppDb import AppDb
from ulauncher.utils.desktop.DesktopParser import DesktopParser
class AutostartPreference:
AUTOSTART_FLAG = 'X-GNOME-Autostart-enabled'
_ulauncher_desktop = None # path to ulauncher.desktop
_ulauncher_autostart_desktop = None # path ~/.config/autostart/ulauncher.desktop
def __init__(self):
self._ulauncher_desktop = self._get_app_desktop()
self._ulauncher_autostart_desktop = os.path.join(xdg_config_home, 'autostart', 'ulauncher.desktop')
def _get_app_desktop(self):
"""
:rtype: str
:returns: path to desktop file
"""
record = AppDb.get_instance().get_by_name('Ulauncher')
if record:
return record['desktop_file']
return None
def _get_autostart_parser(self):
"""
Read ulauncher.desktop
"""
return DesktopParser(self._ulauncher_autostart_desktop)
def is_allowed(self):
"""
:returns: True if autostart is allowed for Ulauncher
"""
return bool(self._ulauncher_desktop)
def is_on(self):
"""
:returns: True if Ulauncher starts automatically
"""
try:
return self._get_autostart_parser().get_boolean(self.AUTOSTART_FLAG)
# pylint: disable=broad-except
except Exception:
return False
def switch(self, is_on):
"""
if `is_on` is True, set `X-GNOME-Autostart-enabled=true` and
write file to `~/.config/autostart/ulauncher.desktop`
:param bool is_on:
:raises SwitchError: if something goes wrong
"""
if not self.is_allowed():
raise SwitchError('Autostart is not allowed')
try:
try:
autostart_info = self._get_autostart_parser()
except IOError:
autostart_info = DesktopParser(self._ulauncher_desktop)
autostart_info.set_filename(self._ulauncher_autostart_desktop)
autostart_info.set(self.AUTOSTART_FLAG, str(bool(is_on)).lower())
autostart_info.set('Exec', '%s %s' % (autostart_info.get('Exec'), '--hide-window'))
autostart_info.write()
except Exception as e:
raise SwitchError('Unexpected exception: %s' % e)
class SwitchError(RuntimeError):
pass
| gpl-3.0 |
nickicejones/ENGAGE | ENGAGE2.0/ENGAGE2.0/Raw data processing/3.rawdataprocessing.py | 1 | 4869 | ##### Description of this python file #####
# This is the start location for preprocessing script for the data preparation prior to running the model
##### VARIABLES - Used in this file#####
#
#---------------------------------------------------------------------#
##### START OF CODE #####
### Import statements - Python ###
import arcpy
import numpy as np
import grainsize_lookup
from arcpy.sa import *
### Import Script Files NJ created ###
import DTM_prep
import catchment_prep
import BNG_check
import define_extents
import landcover_prep
import soil_hydro_prep
import grain_size_proportion
import soil_depth_prep
import orgC_prep
import delete_unused_array
### ENVIRONMENT SETTINGS ###
# Overwrite pre-existing files
arcpy.env.overwriteOutput = True
# Check out extensions
arcpy.CheckOutExtension("Spatial")
### GET INPUT PARAMETERS ###
# set environmental workspace
arcpy.env.workspace = arcpy.GetParameterAsText(0)
# Users will have to provide a rivere catchment boundry
river_catchment = arcpy.GetParameterAsText(1)
# Check if MODEL_river_catchment exists as this is needed in the model start of the script.
if arcpy.Exists("MODEL_river_catchment"):
river_catchment = river_catchment
else:
arcpy.Copy_management(river_catchment, "MODEL_river_catchment")
# Digital Terrain Model
DTM = arcpy.GetParameterAsText(2)
# Land Cover Data
Land_cover_type = arcpy.GetParameterAsText(3)
land_cover = arcpy.GetParameterAsText(4)
natural_england_SPS = arcpy.GetParameterAsText(5) # optional
roads = arcpy.GetParameterAsText(6) # optional
# Soil Data
Soil_type = arcpy.GetParameterAsText(7)
soil = arcpy.GetParameterAsText(8)
# Soil grain size Data
soil_parent_material_50 = arcpy.GetParameterAsText(9) # shapefile of UK coverage
# Soil depth Data
# Uk soil parent material
advanced_superficial_deposit = arcpy.GetParameterAsText(10) # raster of superficial deposit depth
soil_parent_material_1 = arcpy.GetParameterAsText(11)
orgC = arcpy.GetParameterAsText(12)
# Q50 exceedance which is used to determine which grainsizes are present in the channel
Q50_exceedance = arcpy.GetParameterAsText(13)
### Start of data preparation ###
# Prepare the DTM.
DTM_fill, DTM_flow_direction, cell_size = DTM_prep.DTM_preparation(DTM)
# Prepare the river catchment for clipping
river_catchment_polygon = catchment_prep.catchment_preparation(river_catchment, cell_size)
# Check if user is using FAO or Corine and if orgC is provided
def check_BNG_needed(Soil_type, Land_cover_type, orgC):
if Soil_type == 'FAO':
BNG = True
if Land_cover_type == 'CORINE 2006':
BNG = True
if orgC and orgC != '#':
BNG = True
else:
BNG = False
return BNG
BNG = False #check_BNG_needed(Soil_type, Land_cover_type, orgC)
# Check if files need to be converted to BNG
DTM_BNG, soil_BNG, land_cover_BNG, river_catchment_BNG, orgC_BNG = BNG_check.convert_BNG(BNG, DTM_fill, soil, land_cover, orgC, river_catchment_polygon)
# Calculate a buffer catchment and the extents of the catchments
catch_extent, buffer_catchment, buffer_extent = define_extents.calculate_catchment_extents(river_catchment_BNG)
# Clip the DTM and return the cell size and bottom left corner
DTM_clip, DTM_cell_size, bottom_left_corner = DTM_prep.DTM_clip(DTM_BNG, catch_extent, river_catchment_BNG)
DTM_clip_np = arcpy.RasterToNumPyArray("MODEL_DTM", '#', '#', '#', -9999)
# Clip the land cover to the river catchment
land_cover_clipped = landcover_prep.land_cover_clip_analysis(land_cover, Land_cover_type, DTM_clip_np, DTM_cell_size, buffer_catchment, buffer_extent,
river_catchment_BNG, catch_extent, natural_england_SPS, roads, bottom_left_corner)
# Clip the soil hydrology to the river catchment
soil_clipped = soil_hydro_prep.soil_clip_analysis(soil_BNG, Soil_type, DTM_cell_size, buffer_catchment, buffer_extent, river_catchment_BNG, catch_extent)
# Calculate the distribuiton of grain sizes across the catchment
soil_grain_calculation = grain_size_proportion.grain_size_calculation(soil_parent_material_50, DTM_clip_np, DTM_cell_size, buffer_catchment, buffer_extent, river_catchment_BNG, catch_extent, bottom_left_corner, Q50_exceedance)
# Calculate the distribution of soil depth across the river catchment
soil_depth_calculation = soil_depth_prep.soil_depth_calc(soil_parent_material_1, advanced_superficial_deposit, DTM_clip_np, DTM_cell_size, buffer_catchment, buffer_extent, river_catchment_BNG, catch_extent, bottom_left_corner)
# Calculate the organic carbon in the topsoil
soil_orgC_calculation = orgC_prep.soil_orgC_calc(orgC_BNG, DTM_cell_size, buffer_catchment, buffer_extent, river_catchment_BNG, catch_extent, bottom_left_corner)
# Delete the unused files
delete_unused_array.delete_temp_files()
arcpy.AddMessage("Preprocessing complete") | gpl-2.0 |
joram/sickbeard-orange | sickbeard/browser.py | 11 | 4290 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import os
import string
import cherrypy
from sickbeard import encodingKludge as ek
# use the built-in if it's available (python 2.6), if not use the included library
try:
import json
except ImportError:
from lib import simplejson as json
# this is for the drive letter code, it only works on windows
if os.name == 'nt':
from ctypes import windll
# adapted from http://stackoverflow.com/questions/827371/is-there-a-way-to-list-all-the-available-drive-letters-in-python/827490
def getWinDrives():
""" Return list of detected drives """
assert os.name == 'nt'
drives = []
bitmask = windll.kernel32.GetLogicalDrives() #@UndefinedVariable
for letter in string.uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
def foldersAtPath(path, includeParent=False):
""" Returns a list of dictionaries with the folders contained at the given path
Give the empty string as the path to list the contents of the root path
under Unix this means "/", on Windows this will be a list of drive letters)
"""
# walk up the tree until we find a valid path
while path and not os.path.isdir(path):
if path == os.path.dirname(path):
path = ''
break
else:
path = os.path.dirname(path)
if path == "":
if os.name == 'nt':
entries = [{'current_path': 'Root'}]
for letter in getWinDrives():
letterPath = letter + ':\\'
entries.append({'name': letterPath, 'path': letterPath})
return entries
else:
path = '/'
# fix up the path and find the parent
path = os.path.abspath(os.path.normpath(path))
parentPath = os.path.dirname(path)
# if we're at the root then the next step is the meta-node showing our drive letters
if path == parentPath and os.name == 'nt':
parentPath = ""
fileList = [{ 'name': filename, 'path': ek.ek(os.path.join, path, filename) } for filename in ek.ek(os.listdir, path)]
fileList = filter(lambda entry: ek.ek(os.path.isdir, entry['path']), fileList)
# prune out directories to proect the user from doing stupid things (already lower case the dir to reduce calls)
hideList = ["boot", "bootmgr", "cache", "msocache", "recovery", "$recycle.bin", "recycler", "system volume information", "temporary internet files"] # windows specific
hideList += [".fseventd", ".spotlight", ".trashes", ".vol", "cachedmessages", "caches", "trash"] # osx specific
fileList = filter(lambda entry: entry['name'].lower() not in hideList, fileList)
fileList = sorted(fileList, lambda x, y: cmp(os.path.basename(x['name']).lower(), os.path.basename(y['path']).lower()))
entries = [{'current_path': path}]
if includeParent and parentPath != path:
entries.append({ 'name': "..", 'path': parentPath })
entries.extend(fileList)
return entries
class WebFileBrowser:
@cherrypy.expose
def index(self, path=''):
cherrypy.response.headers['Content-Type'] = "application/json"
return json.dumps(foldersAtPath(path, True))
@cherrypy.expose
def complete(self, term):
cherrypy.response.headers['Content-Type'] = "application/json"
paths = [entry['path'] for entry in foldersAtPath(os.path.dirname(term)) if 'path' in entry]
return json.dumps( paths )
| gpl-3.0 |
Filechaser/nzbToMedia | libs/yaml/composer.py | 534 | 4921 |
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| gpl-3.0 |
jiasir/pycs | vulpo/pyami/copybot.py | 1 | 4290 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import vulpo
from vulpo.pyami.scriptbase import ScriptBase
import os, StringIO
class CopyBot(ScriptBase):
def __init__(self):
super(CopyBot, self).__init__()
self.wdir = vulpo.config.get('Pyami', 'working_dir')
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.wdir, self.log_file)
vulpo.set_file_logger(self.name, self.log_path)
self.src_name = vulpo.config.get(self.name, 'src_bucket')
self.dst_name = vulpo.config.get(self.name, 'dst_bucket')
self.replace = vulpo.config.getbool(self.name, 'replace_dst', True)
scs = vulpo.connect_scs()
self.src = scs.lookup(self.src_name)
if not self.src:
vulpo.log.error('Source bucket does not exist: %s' % self.src_name)
dest_access_key = vulpo.config.get(self.name, 'dest_aws_access_key_id', None)
if dest_access_key:
dest_secret_key = vulpo.config.get(self.name, 'dest_aws_secret_access_key', None)
scs = vulpo.connect(dest_access_key, dest_secret_key)
self.dst = scs.lookup(self.dst_name)
if not self.dst:
self.dst = scs.create_bucket(self.dst_name)
def copy_bucket_acl(self):
if vulpo.config.get(self.name, 'copy_acls', True):
acl = self.src.get_xml_acl()
self.dst.set_xml_acl(acl)
def copy_key_acl(self, src, dst):
if vulpo.config.get(self.name, 'copy_acls', True):
acl = src.get_xml_acl()
dst.set_xml_acl(acl)
def copy_keys(self):
vulpo.log.info('src=%s' % self.src.name)
vulpo.log.info('dst=%s' % self.dst.name)
try:
for key in self.src:
if not self.replace:
exists = self.dst.lookup(key.name)
if exists:
vulpo.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name))
continue
vulpo.log.info('copying %d bytes from key=%s' % (key.size, key.name))
prefix, base = os.path.split(key.name)
path = os.path.join(self.wdir, base)
key.get_contents_to_filename(path)
new_key = self.dst.new_key(key.name)
new_key.set_contents_from_filename(path)
self.copy_key_acl(key, new_key)
os.unlink(path)
except:
vulpo.log.exception('Error copying key: %s' % key.name)
def copy_log(self):
key = self.dst.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
def main(self):
fp = StringIO.StringIO()
vulpo.config.dump_safe(fp)
self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue())
if self.src and self.dst:
self.copy_keys()
if self.dst:
self.copy_log()
self.notify('%s (%s) Stopping' % (self.name, self.instance_id),
'Copy Operation Complete')
if vulpo.config.getbool(self.name, 'exit_on_completion', True):
ec2 = vulpo.connect_ec2()
ec2.terminate_instances([self.instance_id])
| mit |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch06/02_tuning.py | 22 | 5484 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
phase = "02"
def create_ngram_model(params=None):
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__stop_words=[None, "english"],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print(clf)
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(vect__ngram_range=(1, 2),
vect__min_df=1,
vect__stop_words=None,
vect__smooth_idf=False,
vect__use_idf=False,
vect__sublinear_tf=True,
vect__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_ngram_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
| mit |
jaggukaka/sentiment-evaluation | compare.py | 1 | 8097 | """Usage:
python compare.py <path to text file with annotated data> <path to config file>
"""
import sys
import os
import codecs
import csv
import logging
from collections import Counter
from bitext import Bitext
from chatterbox import Chatterbox
from datumbox import Datumbox
from lymbix import Lymbix
from repustate import Repustate
from semantria_api import Semantria
from skyttle import Skyttle
from viralheat import Viralheat
from aiapplied import AIApplied
from sentigem import Sentigem
from thr import Thr
ANALYZERS_TO_USE = [
#'skyttle',
#'chatterbox',
'datumbox'
#'repustate',
#'bitext',
#'semantria',
#'viralheat',
#'lymbix',
#'aiapplied',
#'sentigem'
]
ANALYZERS = []
LOGGER = None
def setup_logging():
"""Log debug or higher to a file, errors to stderr
"""
global LOGGER
fname = 'compare.log'
if os.path.exists(fname):
os.unlink(fname)
format = '%(levelname)s:%(name)s:%(message)s'
logging.basicConfig(filename=fname, level=logging.DEBUG, format=format)
LOGGER = logging.getLogger('APICompare')
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.ERROR)
streamhandler.setFormatter(logging.Formatter(format))
LOGGER.addHandler(streamhandler)
def read_evaluation_data(fname):
"""Read the gold standard data.
:return doc_id2doc: document id to the text of the document
:return doc_id2key: document id to the manually assigned sentiment label
"""
doc_id2key = {}
doc_id2doc = {}
doc_id = 0
for line in codecs.open(fname, 'r', 'utf8'):
line = line.strip()
if not line:
continue
try:
document, key = line.split('\t')
key = key.strip()
document = document.strip()
except ValueError:
document = line
if key == 'X':
continue
doc_id2key[doc_id] = key
doc_id2doc[doc_id] = document
doc_id += 1
return doc_id2doc, doc_id2key
def read_config(config_fname=None):
"""Read API keys
"""
config = {}
if not config_fname:
config_fname = 'config.txt'
for line in codecs.open(config_fname, 'r', 'utf8'):
key, val = line.split('\t')
config[key.strip()] = val.strip()
return config
def initialize_analysers(config):
"""Initialise analysers
"""
if 'skyttle' in ANALYZERS_TO_USE:
skyttle = Skyttle(mashape_auth=config['mashape_auth'],
language=config['language'])
ANALYZERS.append(skyttle)
if 'chatterbox' in ANALYZERS_TO_USE:
chatterbox = Chatterbox(mashape_auth=config['mashape_auth'],
language=config['language'])
ANALYZERS.append(chatterbox)
if 'datumbox' in ANALYZERS_TO_USE:
datumbox = Datumbox(api_key=config['datumbox_key'])
ANALYZERS.append(datumbox)
if 'repustate' in ANALYZERS_TO_USE:
repustate = Repustate(api_key=config['repustate_key'])
ANALYZERS.append(repustate)
if 'bitext' in ANALYZERS_TO_USE:
bitext = Bitext(user=config['bitext_user'],
password=config['bitext_pwd'],
language=config['language'])
ANALYZERS.append(bitext)
if 'semantria' in ANALYZERS_TO_USE:
semantria = Semantria(consumer_key=config['semantria_consumer_key'],
consumer_secret=config['semantria_consumer_secret'])
ANALYZERS.append(semantria)
if 'viralheat' in ANALYZERS_TO_USE:
viralheat = Viralheat(api_key=config['viralheat_key'])
ANALYZERS.append(viralheat)
if 'lymbix' in ANALYZERS_TO_USE:
lymbix = Lymbix(api_key=config['lymbix_key'])
ANALYZERS.append(lymbix)
if 'aiapplied' in ANALYZERS_TO_USE:
aiapplied = AIApplied(api_key=config['aiapplied_key'],
language=config['language'])
ANALYZERS.append(aiapplied)
if 'sentigem' in ANALYZERS_TO_USE:
sentigem = Sentigem(api_key=config['sentigem_key'])
ANALYZERS.append(sentigem)
def process_one_doc(text, key):
"""Process one document in all analyzers
:return result_list: a list of outputs for all analyzers
:return hits: a Counter with hits for all analyzers
:return errors: a Counter with errors for all analyzers
"""
global ANALYZERS
hits = Counter()
errors = Counter()
results = {}
Thr.outputs = {}
Thr.inputs = {}
threads = []
for analyser in ANALYZERS:
thr = Thr(analyser, [text])
threads.append(thr)
thr.start()
for thr in threads:
thr.join()
for name, output in Thr.outputs.items():
if isinstance(output, tuple) and not output[0]:
output = 'Error'
if output == key:
hits[name] += 1
elif output != 'Error':
if key == '0' or output == '0':
errors[name] += 1
else:
errors[name] += 2
results[name] = output
result_list = [results[x.name] for x in ANALYZERS]
return result_list, hits, errors
def get_max_weighted_errors(doc_id2key):
"""Determine the maximum possible sum of weighted errors
"""
max_errors = 0
for gs_key in doc_id2key.values():
if gs_key == '0':
max_errors += 1
else:
max_errors += 2
return float(max_errors)
def evaluate(doc_id2text, doc_id2key):
"""Send evaluation documents to each API, output all results into a table,
and if doc_id2key are available, output accuracy and error rate.
"""
total_hits = Counter()
total_errors = Counter()
accuracy = Counter()
error_rate = Counter()
cvswriter = csv.writer(codecs.open('results.csv', 'wb', 'utf8'), delimiter='\t')
col_names = ['doc_id', 'text', 'gold standard'] + [x.name for x in ANALYZERS]
cvswriter.writerow(col_names)
for doc_id, text in sorted(doc_id2text.items()):
key = doc_id2key.get(doc_id)
results, doc_hits, doc_errors = process_one_doc(text, key)
if doc_hits:
total_hits += doc_hits
if doc_errors:
total_errors += doc_errors
cvswriter.writerow([doc_id, text, key] + results)
num_docs = float(len(doc_id2text))
max_errors = get_max_weighted_errors(doc_id2key)
for analyzer in ANALYZERS:
name = analyzer.name
accuracy[name] = total_hits.get(name, 0.0)/num_docs
error_rate[name] = total_errors.get(name, 0.0)/max_errors
return accuracy, error_rate
def main(eval_data_fname, config_fname):
"""Main function
"""
setup_logging()
# read test data
doc_id2text, doc_id2key = read_evaluation_data(eval_data_fname)
# read config
config = read_config(config_fname)
# initialise relevant analysers
initialize_analysers(config)
# evaluate
accuracy, error_rate = evaluate(doc_id2text, doc_id2key)
print "%-15s%s" % ('Analyzer', 'Accuracy')
for name, score in accuracy.most_common():
print "%-15s%.3f" % (name, score)
print
print "%-15s%s" % ('Analyzer', 'Error rate')
for name, score in reversed(error_rate.most_common()):
print "%-15s%.3f" % (name, score)
if __name__ == "__main__":
eval_data_fname = None
config_fname = None
if len(sys.argv) > 1:
eval_data_fname = sys.argv[1]
if len(sys.argv) == 3:
config_fname = sys.argv[2]
else:
raise Exception("Please specify the path to the file with evaluation data")
main(eval_data_fname, config_fname)
| apache-2.0 |
yangqun/lily2-gem5 | tests/long/se/60.bzip2/test.py | 21 | 1751 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
m5.util.addToPath('../configs/common')
from cpu2000 import bzip2_source
workload = bzip2_source(isa, opsys, 'lgred')
root.system.cpu.workload = workload.makeLiveProcess()
| bsd-3-clause |
ntt-sic/python-novaclient | novaclient/tests/test_client.py | 2 | 8822 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
import novaclient.client
import novaclient.extension
import novaclient.tests.fakes as fakes
import novaclient.v1_1.client
import novaclient.v3.client
from novaclient.tests import utils
import json
class ClientTest(utils.TestCase):
def test_client_with_timeout(self):
instance = novaclient.client.HTTPClient(user='user',
password='password',
projectid='project',
timeout=2,
auth_url="http://www.blah.com")
self.assertEqual(instance.timeout, 2)
mock_request = mock.Mock()
mock_request.return_value = requests.Response()
mock_request.return_value.status_code = 200
mock_request.return_value.headers = {
'x-server-management-url': 'blah.com',
'x-auth-token': 'blah',
}
with mock.patch('requests.Session.request', mock_request):
instance.authenticate()
requests.Session.request.assert_called_with(mock.ANY, mock.ANY,
timeout=2,
headers=mock.ANY,
verify=mock.ANY)
def test_client_reauth(self):
instance = novaclient.client.HTTPClient(user='user',
password='password',
projectid='project',
timeout=2,
auth_url="http://www.blah.com")
instance.auth_token = 'foobar'
instance.management_url = 'http://example.com'
instance.version = 'v2.0'
mock_request = mock.Mock()
mock_request.side_effect = novaclient.exceptions.Unauthorized(401)
with mock.patch('requests.Session.request', mock_request):
try:
instance.get('/servers/detail')
except Exception:
pass
get_headers = {'X-Auth-Project-Id': 'project',
'X-Auth-Token': 'foobar',
'User-Agent': 'python-novaclient',
'Accept': 'application/json'}
reauth_headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-novaclient'}
data = {
"auth": {
"tenantName": "project",
"passwordCredentials": {
"username": "user",
"password": "password"
}
}
}
expected = [mock.call('GET',
'http://example.com/servers/detail',
timeout=mock.ANY,
headers=get_headers,
verify=mock.ANY),
mock.call('POST', 'http://www.blah.com/tokens',
timeout=mock.ANY,
headers=reauth_headers,
allow_redirects=mock.ANY,
data=json.dumps(data),
verify=mock.ANY)]
self.assertEqual(mock_request.call_args_list, expected)
def test_get_client_class_v3(self):
output = novaclient.client.get_client_class('3')
self.assertEqual(output, novaclient.v3.client.Client)
def test_get_client_class_v2(self):
output = novaclient.client.get_client_class('2')
self.assertEqual(output, novaclient.v1_1.client.Client)
def test_get_client_class_v2_int(self):
output = novaclient.client.get_client_class(2)
self.assertEqual(output, novaclient.v1_1.client.Client)
def test_get_client_class_v1_1(self):
output = novaclient.client.get_client_class('1.1')
self.assertEqual(output, novaclient.v1_1.client.Client)
def test_get_client_class_unknown(self):
self.assertRaises(novaclient.exceptions.UnsupportedVersion,
novaclient.client.get_client_class, '0')
def test_client_with_os_cache_enabled(self):
cs = novaclient.v1_1.client.Client("user", "password", "project_id",
auth_url="foo/v2", os_cache=True)
self.assertEqual(True, cs.os_cache)
self.assertEqual(True, cs.client.os_cache)
def test_client_with_os_cache_disabled(self):
cs = novaclient.v1_1.client.Client("user", "password", "project_id",
auth_url="foo/v2", os_cache=False)
self.assertEqual(False, cs.os_cache)
self.assertEqual(False, cs.client.os_cache)
def test_client_with_no_cache_enabled(self):
cs = novaclient.v1_1.client.Client("user", "password", "project_id",
auth_url="foo/v2", no_cache=True)
self.assertEqual(False, cs.os_cache)
self.assertEqual(False, cs.client.os_cache)
def test_client_with_no_cache_disabled(self):
cs = novaclient.v1_1.client.Client("user", "password", "project_id",
auth_url="foo/v2", no_cache=False)
self.assertEqual(True, cs.os_cache)
self.assertEqual(True, cs.client.os_cache)
def test_client_set_management_url_v1_1(self):
cs = novaclient.v1_1.client.Client("user", "password", "project_id",
auth_url="foo/v2")
cs.set_management_url("blabla")
self.assertEqual("blabla", cs.client.management_url)
def test_client_get_reset_timings_v1_1(self):
cs = novaclient.v1_1.client.Client("user", "password", "project_id",
auth_url="foo/v2")
self.assertEqual(0, len(cs.get_timings()))
cs.client.times.append("somevalue")
self.assertEqual(1, len(cs.get_timings()))
self.assertEqual("somevalue", cs.get_timings()[0])
cs.reset_timings()
self.assertEqual(0, len(cs.get_timings()))
def test_client_set_management_url_v3(self):
cs = novaclient.v3.client.Client("user", "password", "project_id",
auth_url="foo/v2")
cs.set_management_url("blabla")
self.assertEqual("blabla", cs.client.management_url)
def test_client_get_reset_timings_v3(self):
cs = novaclient.v3.client.Client("user", "password", "project_id",
auth_url="foo/v2")
self.assertEqual(0, len(cs.get_timings()))
cs.client.times.append("somevalue")
self.assertEqual(["somevalue"], cs.get_timings())
cs.reset_timings()
self.assertEqual(0, len(cs.get_timings()))
def test_clent_extensions_v3(self):
fake_attribute_name1 = "FakeAttribute1"
fake_attribute_name2 = "FakeAttribute2"
extensions = [
novaclient.extension.Extension(fake_attribute_name1,
fakes),
novaclient.extension.Extension(fake_attribute_name2,
utils),
]
cs = novaclient.v3.client.Client("user", "password", "project_id",
auth_url="foo/v2",
extensions=extensions)
self.assertTrue(isinstance(getattr(cs, fake_attribute_name1, None),
fakes.FakeManager))
self.assertFalse(hasattr(cs, fake_attribute_name2))
@mock.patch.object(novaclient.client.HTTPClient, 'authenticate')
def test_authenticate_call_v3(self, mock_authenticate):
cs = novaclient.v3.client.Client("user", "password", "project_id",
auth_url="foo/v2")
cs.authenticate()
self.assertTrue(mock_authenticate.called)
| apache-2.0 |
fdupoux/ansible-modules-core | cloud/amazon/ec2_tag.py | 28 | 5604 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
resource:
description:
- The EC2 resource id.
required: true
default: null
aliases: []
state:
description:
- Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance.
required: false
default: present
choices: ['present', 'absent', 'list']
aliases: []
tags:
description:
- a hash/dictionary of tags to add to the resource; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: true
default: null
aliases: []
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic example of adding tag(s)
tasks:
- name: tag a resource
ec2_tag:
region: eu-west-1
resource: vol-XXXXXX
state: present
tags:
Name: ubervol
env: prod
# Playbook example of adding tags to volumes on an instance
tasks:
- name: launch an instance
ec2:
count_tags:
Name: dbserver
Env: production
exact_count: 1
group: "{{ security_group }}"
keypair: "{{ keypair }}"
image: "{{ image_id }}"
instance_tags:
Name: dbserver
Env: production
instance_type: "{{ instance_type }}"
region: eu-west-1
volumes:
- device_name: /dev/xvdb
device_type: standard
volume_size: 10
delete_on_termination: true
wait: true
register: ec2
- name: list the volumes for the instance
ec2_vol:
instance: "{{ item.id }}"
region: eu-west-1
state: list
with_items: ec2.tagged_instances
register: ec2_vol
- name: tag the volumes
ec2_tag:
region: eu-west-1
resource: "{{ item.id }}"
state: present
tags:
Name: dbserver
Env: production
with_subelements:
- ec2_vol.results
- volumes
'''
import sys
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(),
state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
resource = module.params.get('resource')
tags = module.params.get('tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
dictremove = {}
baddict = {}
tagdict = {}
for tag in gettags:
tagdict[tag.name] = tag.value
if state == 'present':
if not tags:
module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
else:
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
tagger = ec2.create_tags(resource, dictadd)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent':
if not tags:
module.fail_json(msg="tags argument is required when state is absent")
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
baddict[key] = value
if set(baddict) == set(tags):
module.exit_json(msg="Nothing to remove here. Move along.", changed=False)
for (key, value) in set(tags.items()):
if (key, value) in set(tagdict.items()):
dictremove[key] = value
tagger = ec2.delete_tags(resource, dictremove)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
if state == 'list':
module.exit_json(changed=False, tags=tagdict)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
home-assistant/home-assistant | homeassistant/components/ipp/__init__.py | 2 | 4560 | """The Internet Printing Protocol (IPP) integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from pyipp import IPP, IPPError, Printer as IPPPrinter
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_NAME,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
CONF_BASE_PATH,
DOMAIN,
)
PLATFORMS = [SENSOR_DOMAIN]
SCAN_INTERVAL = timedelta(seconds=60)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up IPP from a config entry."""
hass.data.setdefault(DOMAIN, {})
coordinator = hass.data[DOMAIN].get(entry.entry_id)
if not coordinator:
# Create IPP instance for this entry
coordinator = IPPDataUpdateCoordinator(
hass,
host=entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
base_path=entry.data[CONF_BASE_PATH],
tls=entry.data[CONF_SSL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
)
hass.data[DOMAIN][entry.entry_id] = coordinator
await coordinator.async_config_entry_first_refresh()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class IPPDataUpdateCoordinator(DataUpdateCoordinator[IPPPrinter]):
"""Class to manage fetching IPP data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
*,
host: str,
port: int,
base_path: str,
tls: bool,
verify_ssl: bool,
) -> None:
"""Initialize global IPP data updater."""
self.ipp = IPP(
host=host,
port=port,
base_path=base_path,
tls=tls,
verify_ssl=verify_ssl,
session=async_get_clientsession(hass, verify_ssl),
)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self) -> IPPPrinter:
"""Fetch data from IPP."""
try:
return await self.ipp.printer()
except IPPError as error:
raise UpdateFailed(f"Invalid response from API: {error}") from error
class IPPEntity(CoordinatorEntity):
"""Defines a base IPP entity."""
def __init__(
self,
*,
entry_id: str,
device_id: str,
coordinator: IPPDataUpdateCoordinator,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the IPP entity."""
super().__init__(coordinator)
self._device_id = device_id
self._enabled_default = enabled_default
self._entry_id = entry_id
self._icon = icon
self._name = name
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this IPP device."""
if self._device_id is None:
return None
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)},
ATTR_NAME: self.coordinator.data.info.name,
ATTR_MANUFACTURER: self.coordinator.data.info.manufacturer,
ATTR_MODEL: self.coordinator.data.info.model,
ATTR_SOFTWARE_VERSION: self.coordinator.data.info.version,
}
| apache-2.0 |
rooshilp/CMPUT410W15-project | testenv/lib/python2.7/site-packages/django/contrib/gis/db/models/fields.py | 33 | 12549 | from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.constants import GIS_LOOKUPS
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.utils import six
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
form_class = forms.GeometryField
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Always include SRID for less fragility; include others if they're
# not the default values.
kwargs['srid'] = self.srid
if self.dim != 2:
kwargs['dim'] = self.dim
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection).lower() in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, SQLEvaluator):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name):
super(GeometryField, self).contribute_to_class(cls, name)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
if lookup_type in connection.ops.gis_terms:
# special case for isnull lookup
if lookup_type == 'isnull':
return []
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if lookup_type in connection.ops.distance_functions:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, SQLEvaluator):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if not value:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value)
for lookup_name in GIS_LOOKUPS:
lookup = type(lookup_name, (GISLookup,), {'lookup_name': lookup_name})
GeometryField.register_lookup(lookup)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
| gpl-2.0 |
jicruz/heroku-bot | lib/requests/models.py | 70 | 34003 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/requests/requests/issues/3578.
import encodings.idna
from urllib3.fields import RequestField
from urllib3.filepost import encode_multipart_formdata
from urllib3.util import parse_url
from urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/requests/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, collections.Mapping))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| gpl-3.0 |
cbanta/pjproject | tests/cdash/main.py | 107 | 1229 | #!/bin/env python
#
# main.py - main entry for PJSIP's CDash tests
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
if len(sys.argv)==1:
print "Usage: main.py cfg_file [cfg_site]"
print "Example:"
print " main.py cfg_gnu"
print " main.py cfg_gnu custom_cfg_site"
sys.exit(1)
args = []
args.extend(sys.argv)
args.remove(args[1])
args.remove(args[0])
cfg_file = __import__(sys.argv[1])
builders = cfg_file.create_builder(args)
for builder in builders:
builder.execute()
| gpl-2.0 |
mahendra-r/edx-platform | lms/djangoapps/teams/search_indexes.py | 12 | 4613 | """ Search index used to load data into elasticsearch"""
import logging
from elasticsearch.exceptions import ConnectionError
from django.conf import settings
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils import translation
from functools import wraps
from search.search_engine_base import SearchEngine
from .errors import ElasticSearchConnectionError
from .serializers import CourseTeamSerializer, CourseTeam
def if_search_enabled(f):
"""
Only call `f` if search is enabled for the CourseTeamIndexer.
"""
@wraps(f)
def wrapper(*args, **kwargs):
"""Wraps the decorated function."""
cls = args[0]
if cls.search_is_enabled():
return f(*args, **kwargs)
return wrapper
class CourseTeamIndexer(object):
"""
This is the index object for searching and storing CourseTeam model instances.
"""
INDEX_NAME = "course_team_index"
DOCUMENT_TYPE_NAME = "course_team"
ENABLE_SEARCH_KEY = "ENABLE_TEAMS"
def __init__(self, course_team):
self.course_team = course_team
def data(self):
"""
Uses the CourseTeamSerializer to create a serialized course_team object.
Adds in additional text and pk fields.
Removes membership relation.
Returns serialized object with additional search fields.
"""
serialized_course_team = CourseTeamSerializer(self.course_team).data
# Save the primary key so we can load the full objects easily after we search
serialized_course_team['pk'] = self.course_team.pk
# Don't save the membership relations in elasticsearch
serialized_course_team.pop('membership', None)
# add generally searchable content
serialized_course_team['content'] = {
'text': self.content_text()
}
return serialized_course_team
def content_text(self):
"""
Generate the text field used for general search.
"""
# Always use the English version of any localizable strings (see TNL-3239)
with translation.override('en'):
return u"{name}\n{description}\n{country}\n{language}".format(
name=self.course_team.name,
description=self.course_team.description,
country=self.course_team.country.name.format(),
language=self._language_name()
)
def _language_name(self):
"""
Convert the language from code to long name.
"""
languages = dict(settings.ALL_LANGUAGES)
try:
return languages[self.course_team.language]
except KeyError:
return self.course_team.language
@classmethod
@if_search_enabled
def index(cls, course_team):
"""
Update index with course_team object (if feature is enabled).
"""
search_engine = cls.engine()
serialized_course_team = CourseTeamIndexer(course_team).data()
search_engine.index(cls.DOCUMENT_TYPE_NAME, [serialized_course_team])
@classmethod
@if_search_enabled
def remove(cls, course_team):
"""
Remove course_team from the index (if feature is enabled).
"""
cls.engine().remove(cls.DOCUMENT_TYPE_NAME, [course_team.team_id])
@classmethod
@if_search_enabled
def engine(cls):
"""
Return course team search engine (if feature is enabled).
"""
try:
return SearchEngine.get_search_engine(index=cls.INDEX_NAME)
except ConnectionError as err:
logging.error('Error connecting to elasticsearch: %s', err)
raise ElasticSearchConnectionError
@classmethod
def search_is_enabled(cls):
"""
Return boolean of whether course team indexing is enabled.
"""
return settings.FEATURES.get(cls.ENABLE_SEARCH_KEY, False)
@receiver(post_save, sender=CourseTeam, dispatch_uid='teams.signals.course_team_post_save_callback')
def course_team_post_save_callback(**kwargs):
"""
Reindex object after save.
"""
try:
CourseTeamIndexer.index(kwargs['instance'])
except ElasticSearchConnectionError:
pass
@receiver(post_delete, sender=CourseTeam, dispatch_uid='teams.signals.course_team_post_delete_callback')
def course_team_post_delete_callback(**kwargs): # pylint: disable=invalid-name
"""
Reindex object after delete.
"""
try:
CourseTeamIndexer.remove(kwargs['instance'])
except ElasticSearchConnectionError:
pass
| agpl-3.0 |
kvar/ansible | lib/ansible/plugins/action/wait_for_connection.py | 1 | 4844 | # (c) 2017, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# CI-required python3 boilerplate
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from datetime import datetime, timedelta
from ansible.module_utils._text import to_text
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('connect_timeout', 'delay', 'sleep', 'timeout'))
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_DELAY = 0
DEFAULT_SLEEP = 1
DEFAULT_TIMEOUT = 600
def do_until_success_or_timeout(self, what, timeout, connect_timeout, what_desc, sleep=1):
max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
e = None
while datetime.utcnow() < max_end_time:
try:
what(connect_timeout)
if what_desc:
display.debug("wait_for_connection: %s success" % what_desc)
return
except Exception as e:
error = e # PY3 compatibility to store exception for use outside of this block
if what_desc:
display.debug("wait_for_connection: %s fail (expected), retrying in %d seconds..." % (what_desc, sleep))
time.sleep(sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, error))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
delay = int(self._task.args.get('delay', self.DEFAULT_DELAY))
sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP))
timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT))
if self._play_context.check_mode:
display.vvv("wait_for_connection: skipping for check_mode")
return dict(skipped=True)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
def ping_module_test(connect_timeout):
''' Test ping module, if available '''
display.vvv("wait_for_connection: attempting ping module test")
# re-run interpreter discovery if we ran it in the first iteration
if self._discovered_interpreter_key:
task_vars['ansible_facts'].pop(self._discovered_interpreter_key, None)
# call connection reset between runs if it's there
try:
self._connection.reset()
except AttributeError:
pass
# Use win_ping on winrm/powershell, else use ping
if getattr(self._connection._shell, "_IS_WINDOWS", False):
ping_result = self._execute_module(module_name='win_ping', module_args=dict(), task_vars=task_vars)
else:
ping_result = self._execute_module(module_name='ping', module_args=dict(), task_vars=task_vars)
# Test module output
if ping_result['ping'] != 'pong':
raise Exception('ping test failed')
start = datetime.now()
if delay:
time.sleep(delay)
try:
# If the connection has a transport_test method, use it first
if hasattr(self._connection, 'transport_test'):
self.do_until_success_or_timeout(self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep)
# Use the ping module test to determine end-to-end connectivity
self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test success", sleep=sleep)
except TimedOutException as e:
result['failed'] = True
result['msg'] = to_text(e)
elapsed = datetime.now() - start
result['elapsed'] = elapsed.seconds
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| gpl-3.0 |
jack-pappas/PyTables | doc/sphinxext/numpydoc.py | 28 | 5688 | """
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined otherwise.
.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
"""
import sphinx
if sphinx.__version__ < '1.0.1':
raise RuntimeError("Sphinx 1.0.1 or newer is required")
import os, re, pydoc
from docscrape_sphinx import get_doc_object, SphinxDocString
from sphinx.util.compat import Directive
import inspect
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = dict(use_plots=app.config.numpydoc_use_plots,
show_class_members=app.config.numpydoc_show_class_members)
if what == 'module':
# Strip top title
title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I|re.S)
lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
else:
doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
lines[:] = unicode(doc).split(u"\n")
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
if hasattr(obj, '__module__'):
v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [u'', u'.. htmlonly::', '']
lines += [u' %s' % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x: -len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(ur'^\d+$', r):
new_r = u"R%d" % (reference_offset[0] + int(r))
else:
new_r = u"%s%d" % (r, reference_offset[0])
lines[i] = lines[i].replace(u'[%s]_' % r,
u'[%s]_' % new_r)
lines[i] = lines[i].replace(u'.. [%s]' % r,
u'.. [%s]' % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
if not hasattr(obj, '__doc__'): return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub(u"^[^(]*", u"", doc['Signature'])
return sig, u''
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
#------------------------------------------------------------------------------
# Docstring-mangling domains
#------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase(object):
directive_mangling_map = {}
def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in self.directive_mangling_map.items():
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
| bsd-3-clause |
alangwansui/mtl_ordercenter | openerp/addons/account_check_writing/report/__init__.py | 446 | 1066 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import check_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mozilla/fjord | vendor/src/html5lib-python/html5lib/tests/test_parser.py | 4 | 3612 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import traceback
import warnings
import re
warnings.simplefilter('error')
from .support import get_data_files
from .support import TestData, convert, convertExpected, treeTypes
from html5lib import html5parser, constants
# Run the parse error checks
checkParseErrors = False
# XXX - There should just be one function here but for some reason the testcase
# format differs from the treedump format by a single space character
def convertTreeDump(data):
return '\n'.join(convert(3)(data).split('\n')[1:])
namespaceExpected = re.compile(r'^(\s*)<(\S+)>', re.M).sub
def runParserTest(innerHTML, input, expected, errors, treeClass,
namespaceHTMLElements):
with warnings.catch_warnings(record=True) as caughtWarnings:
warnings.simplefilter('always')
p = html5parser.HTMLParser(tree=treeClass,
namespaceHTMLElements=namespaceHTMLElements)
try:
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except:
errorMsg = '\n'.join(['\n\nInput:', input, '\nExpected:', expected,
'\nTraceback:', traceback.format_exc()])
assert False, errorMsg
otherWarnings = [x for x in caughtWarnings
if not issubclass(x.category, constants.DataLossWarning)]
assert len(otherWarnings) == 0, [(x.category, x.message) for x in otherWarnings]
if len(caughtWarnings):
return
output = convertTreeDump(p.tree.testSerializer(document))
expected = convertExpected(expected)
if namespaceHTMLElements:
expected = namespaceExpected(r'\1<html \2>', expected)
errorMsg = '\n'.join(['\n\nInput:', input, '\nExpected:', expected,
'\nReceived:', output])
assert expected == output, errorMsg
errStr = []
for (line, col), errorcode, datavars in p.errors:
assert isinstance(datavars, dict), '%s, %s' % (errorcode, repr(datavars))
errStr.append('Line: %i Col: %i %s' % (line, col,
constants.E[errorcode] % datavars))
errorMsg2 = '\n'.join(['\n\nInput:', input,
'\nExpected errors (' + str(len(errors)) + '):\n' + '\n'.join(errors),
'\nActual errors (' + str(len(p.errors)) + '):\n' + '\n'.join(errStr)])
if checkParseErrors:
assert len(p.errors) == len(errors), errorMsg2
def test_parser():
sys.stderr.write('Testing tree builders ' + ' '.join(list(treeTypes.keys())) + '\n')
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace('.dat', '')
if testName in ('template',):
continue
tests = TestData(filename, 'data')
for index, test in enumerate(tests):
input, errors, innerHTML, expected = [test[key] for key in
('data', 'errors',
'document-fragment',
'document')]
if errors:
errors = errors.split('\n')
for treeName, treeCls in treeTypes.items():
for namespaceHTMLElements in (True, False):
yield (runParserTest, innerHTML, input, expected, errors, treeCls,
namespaceHTMLElements)
| bsd-3-clause |
christoph-buente/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/newstringio.py | 132 | 1757 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""'with'-compliant StringIO implementation."""
import StringIO as OldStringIO
class StringIO(OldStringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
| bsd-3-clause |
theinric/rsyslog | plugins/external/solr/rsyslog_solr.py | 12 | 3972 | #! /usr/bin/python
"""A simple plugin to connect rsyslog to SOLR
Based on Radu Gheorghe's idea as expressed in
http://blog.sematext.com/2013/12/16/video-using-solr-for-logs-with-rsyslog-flume-fluentd-and-logstash/
Watch out for slide 26.
Copyright (C) 2014 by Adiscon GmbH
This file is part of rsyslog.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
-or-
see COPYING.ASL20 in the source distribution
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import select
import httplib
import socket
import time
# skeleton config parameters
pollPeriod = 0.75 # the number of seconds between polling for new messages
maxAtOnce = 5000 # max nbr of messages that are processed within one batch
retryInterval = 5
numberOfRetries = 10
errorFile = open("/var/log/rsyslog_solr_oopsies.log", "a")
# App logic global variables
solrServer = "localhost"
solrPort = 8983
solrUpdatePath = "/solr/gettingstarted/update"
solrConnection = "" # HTTP connection to solr
def onInit():
""" Do everything that is needed to initialize processing (e.g.
open files, create handles, connect to systems...)
"""
global solrConnection
solrConnection = httplib.HTTPConnection(solrServer, solrPort)
def onReceive(msgs):
"""This is the entry point where actual work needs to be done. It receives
a list with all messages pulled from rsyslog. The list is of variable
length, but contains all messages that are currently available. It is
suggest NOT to use any further buffering, as we do not know when the
next message will arrive. It may be in a nanosecond from now, but it
may also be in three hours...
"""
solrConnection.request("POST", solrUpdatePath, msgs, {"Content-type": "application/json"})
response = solrConnection.getresponse()
response.read() # apparently we have to read the whole reply to reuse the connection
if (response.status <> 200):
# if there's something wrong with the payload, like a schema mismatch
# write batch to error file and move on. Normally there's no point in retrying here
errorFile.write("%s\n" % msgs)
def onExit():
""" Do everything that is needed to finish processing (e.g.
close files, handles, disconnect from systems...). This is
being called immediately before exiting.
"""
solrConnection.close()
errorFile.close()
onInit()
keepRunning = 1
while keepRunning == 1:
while keepRunning and sys.stdin in select.select([sys.stdin], [], [], pollPeriod)[0]:
msgs = "["
msgsInBatch = 0
while keepRunning and sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if line:
# append the JSON array used by Solr for updates
msgs += line
msgs += ","
else: # an empty line means stdin has been closed
keepRunning = 0
msgsInBatch = msgsInBatch + 1
if msgsInBatch >= maxAtOnce:
break;
if len(msgs) > 0:
retries = 0
while (retries < numberOfRetries):
try:
# close the JSON array used by Solr for updates
onReceive(msgs[:-1] + "]")
break
except socket.error:
# retry if connection failed; it will crash with flames on other exceptions, and you will lose data. But this is something you'd normally see when you first set things up
time.sleep(retryInterval)
retries += 1
# if we failed, we write the failed batch to the error file
if (retries == numberOfRetries):
errorFile.write("%s\n" % msgs)
sys.stdout.flush() # very important, Python buffers far too much!
onExit()
| gpl-3.0 |
rafaelang/tinyids | src/TinyIDS/backends/binmeta.py | 1 | 1635 | # -*- coding: utf-8 -*-
#
# This file is part of TinyIDS.
#
# TinyIDS is a distributed Intrusion Detection System (IDS) for Unix systems.
#
# Project development web site:
#
# http://www.codetrax.org/projects/tinyids
#
# Copyright (c) 2010 George Notaras, G-Loaded.eu, CodeTRAX.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from TinyIDS.collector import BaseCollector
DEFAULT_GLOB_EXP = (
'/usr/local/sbin/*',
'/usr/local/bin/*',
'/sbin/*',
'/bin/*',
'/usr/sbin/*',
'/usr/bin/*',
'/root/bin/*',
'/lib/*',
'/usr/lib/*',
'/usr/local/lib/*',
)
class CollectorBackend(BaseCollector):
name = __name__
def collect(self):
for path in self.file_paths(DEFAULT_GLOB_EXP):
#print 'checking: %s' % path
fst = os.stat(path)
data = '%s %s %s %s %s %s %s\n' % (path, fst.st_mode, fst.st_ino, fst.st_uid, fst.st_gid, fst.st_size, fst.st_mtime)
yield data
if __name__ == '__main__':
for data in CollectorBackend().collect():
sys.stdout.write(data)
sys.stdout.flush()
| apache-2.0 |
cthecheese/Playground | customslider/node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
bitifirefly/edx-platform | common/lib/capa/capa/tests/test_responsetypes.py | 23 | 101030 | # -*- coding: utf-8 -*-
"""
Tests of responsetypes
"""
from cStringIO import StringIO
from datetime import datetime
import json
import os
import pyparsing
import random
import textwrap
import unittest
import zipfile
import mock
from pytz import UTC
import requests
from . import new_loncapa_problem, test_capa_system, load_fixture
import calc
from capa.responsetypes import LoncapaProblemError, \
StudentInputError, ResponseError
from capa.correctmap import CorrectMap
from capa.tests.response_xml_factory import (
AnnotationResponseXMLFactory,
ChoiceResponseXMLFactory,
CodeResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
JavascriptResponseXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
SchematicResponseXMLFactory,
StringResponseXMLFactory,
SymbolicResponseXMLFactory,
TrueFalseResponseXMLFactory,
)
from capa.util import convert_files_to_filenames
from capa.xqueue_interface import dateformat
class ResponseTest(unittest.TestCase):
"""Base class for tests of capa responses."""
xml_factory_class = None
# If something is wrong, show it to us.
maxDiff = None
def setUp(self):
super(ResponseTest, self).setUp()
if self.xml_factory_class:
self.xml_factory = self.xml_factory_class()
def build_problem(self, capa_system=None, **kwargs):
xml = self.xml_factory.build_xml(**kwargs)
return new_loncapa_problem(xml, capa_system=capa_system)
def assert_grade(self, problem, submission, expected_correctness, msg=None):
input_dict = {'1_2_1': submission}
correct_map = problem.grade_answers(input_dict)
if msg is None:
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness)
else:
self.assertEquals(correct_map.get_correctness('1_2_1'), expected_correctness, msg)
def assert_answer_format(self, problem):
answers = problem.get_question_answers()
self.assertTrue(answers['1_2_1'] is not None)
def assert_multiple_grade(self, problem, correct_answers, incorrect_answers):
for input_str in correct_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'correct')
for input_str in incorrect_answers:
result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1')
self.assertEqual(result, 'incorrect')
def _get_random_number_code(self):
"""Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))"
def _get_random_number_result(self, seed_value):
"""Returns a result that should be generated using the random_number_code."""
rand = random.Random(seed_value)
return str(rand.randint(0, 1e9))
class MultiChoiceResponseTest(ResponseTest):
xml_factory_class = MultipleChoiceResponseXMLFactory
def test_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'incorrect')
def test_named_multiple_choice_grade(self):
problem = self.build_problem(choices=[False, True, False],
choice_names=["foil_1", "foil_2", "foil_3"])
# Ensure that we get the expected grades
self.assert_grade(problem, 'choice_foil_1', 'incorrect')
self.assert_grade(problem, 'choice_foil_2', 'correct')
self.assert_grade(problem, 'choice_foil_3', 'incorrect')
class TrueFalseResponseTest(ResponseTest):
xml_factory_class = TrueFalseResponseXMLFactory
def test_true_false_grade(self):
problem = self.build_problem(choices=[False, True, True])
# Check the results
# Mark correct if and only if ALL (and only) correct choices selected
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'incorrect')
self.assert_grade(problem, 'choice_2', 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2'], 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct')
# Invalid choices should be marked incorrect (we have no choice 3)
self.assert_grade(problem, 'choice_3', 'incorrect')
self.assert_grade(problem, 'not_a_choice', 'incorrect')
def test_named_true_false_grade(self):
problem = self.build_problem(choices=[False, True, True],
choice_names=['foil_1', 'foil_2', 'foil_3'])
# Check the results
# Mark correct if and only if ALL (and only) correct chocies selected
self.assert_grade(problem, 'choice_foil_1', 'incorrect')
self.assert_grade(problem, 'choice_foil_2', 'incorrect')
self.assert_grade(problem, 'choice_foil_3', 'incorrect')
self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2', 'choice_foil_3'], 'incorrect')
self.assert_grade(problem, ['choice_foil_1', 'choice_foil_3'], 'incorrect')
self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2'], 'incorrect')
self.assert_grade(problem, ['choice_foil_2', 'choice_foil_3'], 'correct')
# Invalid choices should be marked incorrect
self.assert_grade(problem, 'choice_foil_4', 'incorrect')
self.assert_grade(problem, 'not_a_choice', 'incorrect')
def test_single_correct_response(self):
problem = self.build_problem(choices=[True, False])
self.assert_grade(problem, 'choice_0', 'correct')
self.assert_grade(problem, ['choice_0'], 'correct')
class ImageResponseTest(ResponseTest):
xml_factory_class = ImageResponseXMLFactory
def test_rectangle_grade(self):
# Define a rectangle with corners (10,10) and (20,20)
problem = self.build_problem(rectangle="(10,10)-(20,20)")
# Anything inside the rectangle (and along the borders) is correct
# Everything else is incorrect
correct_inputs = ["[12,19]", "[10,10]", "[20,20]",
"[10,15]", "[20,15]", "[15,10]", "[15,20]"]
incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_multiple_rectangles_grade(self):
# Define two rectangles
rectangle_str = "(10,10)-(20,20);(100,100)-(200,200)"
# Expect that only points inside the rectangles are marked correct
problem = self.build_problem(rectangle=rectangle_str)
correct_inputs = ["[12,19]", "[120, 130]"]
incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]",
"[50,55]", "[300, 14]", "[120, 400]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_region_grade(self):
# Define a triangular region with corners (0,0), (5,10), and (0, 10)
region_str = "[ [1,1], [5,10], [0,10] ]"
# Expect that only points inside the triangle are marked correct
problem = self.build_problem(regions=region_str)
correct_inputs = ["[2,4]", "[1,3]"]
incorrect_inputs = ["[0,0]", "[3,5]", "[5,15]", "[30, 12]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_multiple_regions_grade(self):
# Define multiple regions that the user can select
region_str = "[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"
# Expect that only points inside the regions are marked correct
problem = self.build_problem(regions=region_str)
correct_inputs = ["[15,12]", "[110,112]"]
incorrect_inputs = ["[0,0]", "[600,300]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_region_and_rectangle_grade(self):
rectangle_str = "(100,100)-(200,200)"
region_str = "[[10,10], [20,10], [20, 30]]"
# Expect that only points inside the rectangle or region are marked correct
problem = self.build_problem(regions=region_str, rectangle=rectangle_str)
correct_inputs = ["[13,12]", "[110,112]"]
incorrect_inputs = ["[0,0]", "[600,300]"]
self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs)
def test_show_answer(self):
rectangle_str = "(100,100)-(200,200)"
region_str = "[[10,10], [20,10], [20, 30]]"
problem = self.build_problem(regions=region_str, rectangle=rectangle_str)
self.assert_answer_format(problem)
class SymbolicResponseTest(ResponseTest):
xml_factory_class = SymbolicResponseXMLFactory
def test_grade_single_input_correct(self):
problem = self.build_problem(math_display=True, expect="2*x+3*y")
# Correct answers
correct_inputs = [
('2x+3y', textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>2</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
</mstyle></math>"""),
'snuggletex_2x+3y.xml'),
('x+x+3y', textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mi>x</mi><mo>+</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
</mstyle></math>"""),
'snuggletex_x+x+3y.xml'),
]
for (input_str, input_mathml, server_fixture) in correct_inputs:
print "Testing input: {0}".format(input_str)
server_resp = load_fixture(server_fixture)
self._assert_symbolic_grade(
problem, input_str, input_mathml,
'correct', snuggletex_resp=server_resp
)
def test_grade_single_input_incorrect(self):
problem = self.build_problem(math_display=True, expect="2*x+3*y")
# Incorrect answers
incorrect_inputs = [
('0', ''),
('4x+3y', textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>4</mn><mo>*</mo><mi>x</mi><mo>+</mo><mn>3</mn><mo>*</mo><mi>y</mi>
</mstyle></math>""")),
]
for (input_str, input_mathml) in incorrect_inputs:
self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect')
def test_complex_number_grade_correct(self):
problem = self.build_problem(
math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
options=["matrix", "imaginary"]
)
correct_snuggletex = load_fixture('snuggletex_correct.html')
dynamath_input = load_fixture('dynamath_input.txt')
student_response = "cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]"
self._assert_symbolic_grade(
problem, student_response, dynamath_input,
'correct',
snuggletex_resp=correct_snuggletex
)
def test_complex_number_grade_incorrect(self):
problem = self.build_problem(math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
options=["matrix", "imaginary"])
wrong_snuggletex = load_fixture('snuggletex_wrong.html')
dynamath_input = textwrap.dedent("""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true"><mn>2</mn></mstyle>
</math>
""")
self._assert_symbolic_grade(
problem, "2", dynamath_input,
'incorrect',
snuggletex_resp=wrong_snuggletex,
)
def test_multiple_inputs_exception(self):
# Should not allow multiple inputs, since we specify
# only one "expect" value
with self.assertRaises(Exception):
self.build_problem(math_display=True, expect="2*x+3*y", num_inputs=3)
def _assert_symbolic_grade(
self, problem, student_input, dynamath_input, expected_correctness,
snuggletex_resp=""
):
"""
Assert that the symbolic response has a certain grade.
`problem` is the capa problem containing the symbolic response.
`student_input` is the text the student entered.
`dynamath_input` is the JavaScript rendered MathML from the page.
`expected_correctness` is either "correct" or "incorrect"
`snuggletex_resp` is the simulated response from the Snuggletex server
"""
input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input)}
# Simulate what the Snuggletex server would respond
with mock.patch.object(requests, 'post') as mock_post:
mock_post.return_value.text = snuggletex_resp
correct_map = problem.grade_answers(input_dict)
self.assertEqual(
correct_map.get_correctness('1_2_1'), expected_correctness
)
class OptionResponseTest(ResponseTest):
xml_factory_class = OptionResponseXMLFactory
def test_grade(self):
problem = self.build_problem(options=["first", "second", "third"],
correct_option="second")
# Assert that we get the expected grades
self.assert_grade(problem, "first", "incorrect")
self.assert_grade(problem, "second", "correct")
self.assert_grade(problem, "third", "incorrect")
# Options not in the list should be marked incorrect
self.assert_grade(problem, "invalid_option", "incorrect")
def test_quote_option(self):
# Test that option response properly escapes quotes inside options strings
problem = self.build_problem(options=["hasnot", "hasn't", "has'nt"],
correct_option="hasn't")
# Assert that correct option with a quote inside is marked correctly
self.assert_grade(problem, "hasnot", "incorrect")
self.assert_grade(problem, "hasn't", "correct")
self.assert_grade(problem, "hasn\'t", "correct")
self.assert_grade(problem, "has'nt", "incorrect")
def test_variable_options(self):
"""
Test that if variable are given in option response then correct map must contain answervariable value.
"""
script = textwrap.dedent("""\
a = 1000
b = a*2
c = a*3
""")
problem = self.build_problem(
options=['$a', '$b', '$c'],
correct_option='$a',
script=script
)
input_dict = {'1_2_1': '1000'}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
self.assertEqual(correct_map.get_property('1_2_1', 'answervariable'), '$a')
class FormulaResponseTest(ResponseTest):
"""
Test the FormulaResponse class
"""
xml_factory_class = FormulaResponseXMLFactory
def test_grade(self):
"""
Test basic functionality of FormulaResponse
Specifically, if it can understand equivalence of formulae
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
# The expected solution is numerically equivalent to x+2y
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer="x+2*y")
# Expect an equivalent formula to be marked correct
# 2x - x + y + y = x + 2y
input_formula = "2*x - x + y + y"
self.assert_grade(problem, input_formula, "correct")
# Expect an incorrect formula to be marked incorrect
# x + y != x + 2y
input_formula = "x + y"
self.assert_grade(problem, input_formula, "incorrect")
def test_hint(self):
"""
Test the hint-giving functionality of FormulaResponse
"""
# Sample variables x and y in the range [-10, 10]
sample_dict = {'x': (-10, 10), 'y': (-10, 10)}
# Give a hint if the user leaves off the coefficient
# or leaves out x
hints = [('x + 3*y', 'y_coefficient', 'Check the coefficient of y'),
('2*y', 'missing_x', 'Try including the variable x')]
# The expected solution is numerically equivalent to x+2y
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer="x+2*y",
hints=hints)
# Expect to receive a hint if we add an extra y
input_dict = {'1_2_1': "x + 2*y + y"}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
'Check the coefficient of y')
# Expect to receive a hint if we leave out x
input_dict = {'1_2_1': "2*y"}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
'Try including the variable x')
def test_script(self):
"""
Test if python script can be used to generate answers
"""
# Calculate the answer using a script
script = "calculated_ans = 'x+x'"
# Sample x in the range [-10,10]
sample_dict = {'x': (-10, 10)}
# The expected solution is numerically equivalent to 2*x
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance=0.01,
answer="$calculated_ans",
script=script)
# Expect that the inputs are graded correctly
self.assert_grade(problem, '2*x', 'correct')
self.assert_grade(problem, '3*x', 'incorrect')
def test_grade_infinity(self):
"""
Test that a large input on a problem with relative tolerance isn't
erroneously marked as correct.
"""
sample_dict = {'x': (1, 2)}
# Test problem
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x")
# Expect such a large answer to be marked incorrect
input_formula = "x*1e999"
self.assert_grade(problem, input_formula, "incorrect")
# Expect such a large negative answer to be marked incorrect
input_formula = "-x*1e999"
self.assert_grade(problem, input_formula, "incorrect")
def test_grade_nan(self):
"""
Test that expressions that evaluate to NaN are not marked as correct.
"""
sample_dict = {'x': (1, 2)}
# Test problem
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x")
# Expect an incorrect answer (+ nan) to be marked incorrect
# Right now this evaluates to 'nan' for a given x (Python implementation-dependent)
input_formula = "10*x + 0*1e999"
self.assert_grade(problem, input_formula, "incorrect")
# Expect an correct answer (+ nan) to be marked incorrect
input_formula = "x + 0*1e999"
self.assert_grade(problem, input_formula, "incorrect")
def test_raises_zero_division_err(self):
"""
See if division by zero raises an error.
"""
sample_dict = {'x': (1, 2)}
problem = self.build_problem(sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
def test_validate_answer(self):
"""
Makes sure that validate_answer works.
"""
sample_dict = {'x': (1, 2)}
problem = self.build_problem(
sample_dict=sample_dict,
num_samples=10,
tolerance="1%",
answer="x"
)
self.assertTrue(problem.responders.values()[0].validate_answer('14*x'))
self.assertFalse(problem.responders.values()[0].validate_answer('3*y+2*x'))
class StringResponseTest(ResponseTest):
xml_factory_class = StringResponseXMLFactory
def test_backward_compatibility_for_multiple_answers(self):
"""
Remove this test, once support for _or_ separator will be removed.
"""
answers = ["Second", "Third", "Fourth"]
problem = self.build_problem(answer="_or_".join(answers), case_sensitive=True)
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
problem = self.build_problem(answer="_or_".join(answers), case_sensitive=False)
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
self.assert_grade(problem, answer.lower(), "correct")
self.assert_grade(problem, "Other String", "incorrect")
def test_regexp(self):
problem = self.build_problem(answer="Second", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="sec", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "incorrect")
problem = self.build_problem(answer="sec.*", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="sec.*", case_sensitive=True, regexp=True)
self.assert_grade(problem, "Second", "incorrect")
problem = self.build_problem(answer="Sec.*$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="^sec$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "incorrect")
problem = self.build_problem(answer="^Sec(ond)?$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Second", "correct")
problem = self.build_problem(answer="^Sec(ond)?$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "Sec", "correct")
problem = self.build_problem(answer="tre+", case_sensitive=False, regexp=True)
self.assert_grade(problem, "There is a tree", "incorrect")
problem = self.build_problem(answer=".*tre+", case_sensitive=False, regexp=True)
self.assert_grade(problem, "There is a tree", "correct")
# test with case_sensitive not specified
problem = self.build_problem(answer=".*tre+", regexp=True)
self.assert_grade(problem, "There is a tree", "correct")
answers = [
"Martin Luther King Junior",
"Doctor Martin Luther King Junior",
"Dr. Martin Luther King Jr.",
"Martin Luther King"
]
problem = self.build_problem(answer="\w*\.?.*Luther King\s*.*", case_sensitive=True, regexp=True)
for answer in answers:
self.assert_grade(problem, answer, "correct")
problem = self.build_problem(answer="^(-\|){2,5}$", case_sensitive=False, regexp=True)
self.assert_grade(problem, "-|-|-|", "correct")
self.assert_grade(problem, "-|", "incorrect")
self.assert_grade(problem, "-|-|-|-|-|-|", "incorrect")
regexps = [
"^One$",
"two",
"^thre+",
"^4|Four$",
]
problem = self.build_problem(
answer="just_sample",
case_sensitive=False,
regexp=True,
additional_answers=regexps
)
self.assert_grade(problem, "One", "correct")
self.assert_grade(problem, "two", "correct")
self.assert_grade(problem, "!!two!!", "correct")
self.assert_grade(problem, "threeeee", "correct")
self.assert_grade(problem, "three", "correct")
self.assert_grade(problem, "4", "correct")
self.assert_grade(problem, "Four", "correct")
self.assert_grade(problem, "Five", "incorrect")
self.assert_grade(problem, "|", "incorrect")
# test unicode
problem = self.build_problem(answer=u"æ", case_sensitive=False, regexp=True, additional_answers=[u'ö'])
self.assert_grade(problem, u"æ", "correct")
self.assert_grade(problem, u"ö", "correct")
self.assert_grade(problem, u"î", "incorrect")
self.assert_grade(problem, u"o", "incorrect")
def test_backslash_and_unicode_regexps(self):
"""
Test some special cases of [unicode] regexps.
One needs to use either r'' strings or write real `repr` of unicode strings, because of the following
(from python docs, http://docs.python.org/2/library/re.html):
'for example, to match a literal backslash, one might have to write '\\\\' as the pattern string,
because the regular expression must be \\,
and each backslash must be expressed as \\ inside a regular Python string literal.'
Example of real use case in Studio:
a) user inputs regexp in usual regexp language,
b) regexp is saved to xml and is read in python as repr of that string
So a\d in front-end editor will become a\\\\d in xml, so it will match a1 as student answer.
"""
problem = self.build_problem(answer=ur"5\\æ", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"5\æ", "correct")
problem = self.build_problem(answer=u"5\\\\æ", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"5\æ", "correct")
def test_backslash(self):
problem = self.build_problem(answer=u"a\\\\c1", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"a\c1", "correct")
def test_special_chars(self):
problem = self.build_problem(answer=ur"a \s1", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"a 1", "correct")
def test_case_sensitive(self):
# Test single answer
problem_specified = self.build_problem(answer="Second", case_sensitive=True)
# should also be case_sensitive if case sensitivity is not specified
problem_not_specified = self.build_problem(answer="Second")
problems = [problem_specified, problem_not_specified]
for problem in problems:
# Exact string should be correct
self.assert_grade(problem, "Second", "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
self.assert_grade(problem, "second", "incorrect")
# Test multiple answers
answers = ["Second", "Third", "Fourth"]
# set up problems
problem_specified = self.build_problem(
answer="sample_answer", case_sensitive=True, additional_answers=answers
)
problem_not_specified = self.build_problem(
answer="sample_answer", additional_answers=answers
)
problems = [problem_specified, problem_not_specified]
for problem in problems:
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
self.assert_grade(problem, "second", "incorrect")
def test_bogus_escape_not_raised(self):
"""
We now adding ^ and $ around regexp, so no bogus escape error will be raised.
"""
problem = self.build_problem(answer=u"\\", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"\\", "incorrect")
# right way to search for \
problem = self.build_problem(answer=u"\\\\", case_sensitive=False, regexp=True)
self.assert_grade(problem, u"\\", "correct")
def test_case_insensitive(self):
# Test single answer
problem = self.build_problem(answer="Second", case_sensitive=False)
# Both versions of the string should be allowed, regardless
# of capitalization
self.assert_grade(problem, "Second", "correct")
self.assert_grade(problem, "second", "correct")
# Other strings are not allowed
self.assert_grade(problem, "Other String", "incorrect")
# Test multiple answers
answers = ["Second", "Third", "Fourth"]
problem = self.build_problem(answer="sample_answer", case_sensitive=False, additional_answers=answers)
for answer in answers:
# Exact string should be correct
self.assert_grade(problem, answer, "correct")
self.assert_grade(problem, answer.lower(), "correct")
# Other strings and the lowercase version of the string are incorrect
self.assert_grade(problem, "Other String", "incorrect")
def test_compatible_non_attribute_additional_answer_xml(self):
problem = self.build_problem(answer="Donut", non_attribute_answers=["Sprinkles"])
self.assert_grade(problem, "Donut", "correct")
self.assert_grade(problem, "Sprinkles", "correct")
self.assert_grade(problem, "Meh", "incorrect")
def test_partial_matching(self):
problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=['.?\\d.?'])
self.assert_grade(problem, "a3", "correct")
self.assert_grade(problem, "3a", "correct")
def test_exception(self):
problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=['?\\d?'])
with self.assertRaises(Exception) as cm:
self.assert_grade(problem, "a3", "correct")
exception_message = cm.exception.message
self.assertIn("nothing to repeat", exception_message)
def test_hints(self):
hints = [
("wisconsin", "wisc", "The state capital of Wisconsin is Madison"),
("minnesota", "minn", "The state capital of Minnesota is St. Paul"),
]
problem = self.build_problem(
answer="Michigan",
case_sensitive=False,
hints=hints,
)
# We should get a hint for Wisconsin
input_dict = {'1_2_1': 'Wisconsin'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Wisconsin is Madison")
# We should get a hint for Minnesota
input_dict = {'1_2_1': 'Minnesota'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Minnesota is St. Paul")
# We should NOT get a hint for Michigan (the correct answer)
input_dict = {'1_2_1': 'Michigan'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
# We should NOT get a hint for any other string
input_dict = {'1_2_1': 'California'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
def test_hints_regexp_and_answer_regexp(self):
different_student_answers = [
"May be it is Boston",
"Boston, really?",
"Boston",
"OK, I see, this is Boston",
]
# if problem has regexp = true, it will accept hints written in regexp
hints = [
("wisconsin", "wisc", "The state capital of Wisconsin is Madison"),
("minnesota", "minn", "The state capital of Minnesota is St. Paul"),
(".*Boston.*", "bst", "First letter of correct answer is M."),
('^\\d9$', "numbers", "Should not end with 9."),
]
additional_answers = [
'^\\d[0-8]$',
]
problem = self.build_problem(
answer="Michigan",
case_sensitive=False,
hints=hints,
additional_answers=additional_answers,
regexp=True
)
# We should get a hint for Wisconsin
input_dict = {'1_2_1': 'Wisconsin'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Wisconsin is Madison")
# We should get a hint for Minnesota
input_dict = {'1_2_1': 'Minnesota'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'),
"The state capital of Minnesota is St. Paul")
# We should NOT get a hint for Michigan (the correct answer)
input_dict = {'1_2_1': 'Michigan'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
# We should NOT get a hint for any other string
input_dict = {'1_2_1': 'California'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
# We should get the same hint for each answer
for answer in different_student_answers:
input_dict = {'1_2_1': answer}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "First letter of correct answer is M.")
input_dict = {'1_2_1': '59'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "Should not end with 9.")
input_dict = {'1_2_1': '57'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "")
def test_computed_hints(self):
problem = self.build_problem(
answer="Michigan",
hintfn="gimme_a_hint",
script=textwrap.dedent("""
def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
answer = student_answers[aid]
new_cmap.set_hint_and_mode(aid, answer+"??", "always")
""")
)
input_dict = {'1_2_1': 'Hello'}
correct_map = problem.grade_answers(input_dict)
self.assertEquals(correct_map.get_hint('1_2_1'), "Hello??")
def test_hint_function_randomization(self):
# The hint function should get the seed from the problem.
problem = self.build_problem(
answer="1",
hintfn="gimme_a_random_hint",
script=textwrap.dedent("""
def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap):
answer = {code}
new_cmap.set_hint_and_mode(answer_ids[0], answer, "always")
""".format(code=self._get_random_number_code()))
)
correct_map = problem.grade_answers({'1_2_1': '2'})
hint = correct_map.get_hint('1_2_1')
self.assertEqual(hint, self._get_random_number_result(problem.seed))
class CodeResponseTest(ResponseTest):
xml_factory_class = CodeResponseXMLFactory
def setUp(self):
super(CodeResponseTest, self).setUp()
grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
self.problem = self.build_problem(initial_display="def square(x):",
answer_display="answer",
grader_payload=grader_payload,
num_responses=2)
@staticmethod
def make_queuestate(key, time):
"""Create queuestate dict"""
timestr = datetime.strftime(time, dateformat)
return {'key': key, 'time': timestr}
def test_is_queued(self):
"""
Simple test of whether LoncapaProblem knows when it's been queued
"""
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
cmap = CorrectMap()
for answer_id in answer_ids:
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
self.problem.correct_map.update(cmap)
self.assertEquals(self.problem.is_queued(), False)
# Now we queue the LCP
cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuestate = CodeResponseTest.make_queuestate(i, datetime.now(UTC))
cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
self.problem.correct_map.update(cmap)
self.assertEquals(self.problem.is_queued(), True)
def test_update_score(self):
'''
Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem
'''
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
old_cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now(UTC))
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
# Message format common to external graders
grader_msg = '<span>MESSAGE</span>' # Must be valid XML
correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})
incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})
xserver_msgs = {'correct': correct_score_msg,
'incorrect': incorrect_score_msg, }
# Incorrect queuekey, state should not be updated
for correctness in ['correct', 'incorrect']:
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap) # Deep copy
self.problem.update_score(xserver_msgs[correctness], queuekey=0)
self.assertEquals(self.problem.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison
for answer_id in answer_ids:
self.assertTrue(self.problem.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered
# Correct queuekey, state should be updated
for correctness in ['correct', 'incorrect']:
for i, answer_id in enumerate(answer_ids):
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap)
new_cmap = CorrectMap()
new_cmap.update(old_cmap)
npoints = 1 if correctness == 'correct' else 0
new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None)
self.problem.update_score(xserver_msgs[correctness], queuekey=1000 + i)
self.assertEquals(self.problem.correct_map.get_dict(), new_cmap.get_dict())
for j, test_id in enumerate(answer_ids):
if j == i:
self.assertFalse(self.problem.correct_map.is_queued(test_id)) # Should be dequeued, message delivered
else:
self.assertTrue(self.problem.correct_map.is_queued(test_id)) # Should be queued, message undelivered
def test_recentmost_queuetime(self):
'''
Test whether the LoncapaProblem knows about the time of queue requests
'''
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the unqueued state
cmap = CorrectMap()
for answer_id in answer_ids:
cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))
self.problem.correct_map.update(cmap)
self.assertEquals(self.problem.get_recentmost_queuetime(), None)
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
latest_timestamp = datetime.now(UTC)
queuestate = CodeResponseTest.make_queuestate(queuekey, latest_timestamp)
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
self.problem.correct_map.update(cmap)
# Queue state only tracks up to second
latest_timestamp = datetime.strptime(
datetime.strftime(latest_timestamp, dateformat), dateformat
).replace(tzinfo=UTC)
self.assertEquals(self.problem.get_recentmost_queuetime(), latest_timestamp)
def test_convert_files_to_filenames(self):
'''
Test whether file objects are converted to filenames without altering other structures
'''
problem_file = os.path.join(os.path.dirname(__file__), "test_files/filename_convert_test.txt")
with open(problem_file) as fp:
answers_with_file = {'1_2_1': 'String-based answer',
'1_3_1': ['answer1', 'answer2', 'answer3'],
'1_4_1': [fp, fp]}
answers_converted = convert_files_to_filenames(answers_with_file)
self.assertEquals(answers_converted['1_2_1'], 'String-based answer')
self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3'])
self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name])
def test_parse_score_msg_of_responder(self):
"""
Test whether LoncapaProblem._parse_score_msg correcly parses valid HTML5 html.
"""
valid_grader_msgs = [
u'<span>MESSAGE</span>', # Valid XML
textwrap.dedent("""
<div class='matlabResponse'><div id='mwAudioPlaceHolder'>
<audio controls autobuffer autoplay src='data:audio/wav;base64='>Audio is not supported on this browser.</audio>
<div>Right click <a href=https://endpoint.mss-mathworks.com/media/filename.wav>here</a> and click \"Save As\" to download the file</div></div>
<div style='white-space:pre' class='commandWindowOutput'></div><ul></ul></div>
""").replace('\n', ''), # Valid HTML5 real case Matlab response, invalid XML
'<aaa></bbb>' # Invalid XML, but will be parsed by html5lib to <aaa/>
]
invalid_grader_msgs = [
'<audio', # invalid XML and HTML5
'<p>\b</p>', # invalid special character
]
answer_ids = sorted(self.problem.get_question_answers())
# CodeResponse requires internal CorrectMap state. Build it now in the queued state
old_cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i
queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now(UTC))
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
for grader_msg in valid_grader_msgs:
correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})
incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})
xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, }
for i, answer_id in enumerate(answer_ids):
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap)
output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i)
self.assertEquals(output[answer_id]['msg'], grader_msg)
for grader_msg in invalid_grader_msgs:
correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})
incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})
xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, }
for i, answer_id in enumerate(answer_ids):
self.problem.correct_map = CorrectMap()
self.problem.correct_map.update(old_cmap)
output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i)
self.assertEquals(output[answer_id]['msg'], u'Invalid grader reply. Please contact the course staff.')
class ChoiceResponseTest(ResponseTest):
xml_factory_class = ChoiceResponseXMLFactory
def test_radio_group_grade(self):
problem = self.build_problem(choice_type='radio',
choices=[False, True, False])
# Check that we get the expected results
self.assert_grade(problem, 'choice_0', 'incorrect')
self.assert_grade(problem, 'choice_1', 'correct')
self.assert_grade(problem, 'choice_2', 'incorrect')
# No choice 3 exists --> mark incorrect
self.assert_grade(problem, 'choice_3', 'incorrect')
def test_checkbox_group_grade(self):
problem = self.build_problem(choice_type='checkbox',
choices=[False, True, True])
# Check that we get the expected results
# (correct if and only if BOTH correct choices chosen)
self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct')
self.assert_grade(problem, 'choice_1', 'incorrect')
self.assert_grade(problem, 'choice_2', 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect')
self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect')
# No choice 3 exists --> mark incorrect
self.assert_grade(problem, 'choice_3', 'incorrect')
def test_grade_with_no_checkbox_selected(self):
"""
Test that answer marked as incorrect if no checkbox selected.
"""
problem = self.build_problem(
choice_type='checkbox', choices=[False, False, False]
)
correct_map = problem.grade_answers({})
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
class JavascriptResponseTest(ResponseTest):
xml_factory_class = JavascriptResponseXMLFactory
def test_grade(self):
# Compile coffee files into javascript used by the response
coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee"
os.system("node_modules/.bin/coffee -c %s" % (coffee_file_path))
capa_system = test_capa_system()
capa_system.can_execute_unsafe_code = lambda: True
problem = self.build_problem(
capa_system=capa_system,
generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js",
display_class="TestProblemDisplay",
display_src="test_problem_display.js",
param_dict={'value': '4'},
)
# Test that we get graded correctly
self.assert_grade(problem, json.dumps({0: 4}), "correct")
self.assert_grade(problem, json.dumps({0: 5}), "incorrect")
def test_cant_execute_javascript(self):
# If the system says to disallow unsafe code execution, then making
# this problem will raise an exception.
capa_system = test_capa_system()
capa_system.can_execute_unsafe_code = lambda: False
with self.assertRaises(LoncapaProblemError):
self.build_problem(
capa_system=capa_system,
generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js",
display_class="TestProblemDisplay",
display_src="test_problem_display.js",
param_dict={'value': '4'},
)
class NumericalResponseTest(ResponseTest):
xml_factory_class = NumericalResponseXMLFactory
# We blend the line between integration (using evaluator) and exclusively
# unit testing the NumericalResponse (mocking out the evaluator)
# For simple things its not worth the effort.
def test_grade_range_tolerance(self):
problem_setup = [
# [given_asnwer, [list of correct responses], [list of incorrect responses]]
['[5, 7)', ['5', '6', '6.999'], ['4.999', '7']],
['[1.6e-5, 1.9e24)', ['0.000016', '1.6*10^-5', '1.59e24'], ['1.59e-5', '1.9e24', '1.9*10^24']],
['[0, 1.6e-5]', ['1.6*10^-5'], ["2"]],
['(1.6e-5, 10]', ["2"], ['1.6*10^-5']],
]
for given_answer, correct_responses, incorrect_responses in problem_setup:
problem = self.build_problem(answer=given_answer)
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_range_tolerance_exceptions(self):
# no complex number in range tolerance staff answer
problem = self.build_problem(answer='[1j, 5]')
input_dict = {'1_2_1': '3'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
# no complex numbers in student ansers to range tolerance problems
problem = self.build_problem(answer='(1, 5)')
input_dict = {'1_2_1': '1*J'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
# test isnan student input: no exception,
# but problem should be graded as incorrect
problem = self.build_problem(answer='(1, 5)')
input_dict = {'1_2_1': ''}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'incorrect')
# test invalid range tolerance answer
with self.assertRaises(StudentInputError):
problem = self.build_problem(answer='(1 5)')
# test empty boundaries
problem = self.build_problem(answer='(1, ]')
input_dict = {'1_2_1': '3'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
def test_grade_exact(self):
problem = self.build_problem(answer=4)
correct_responses = ["4", "4.0", "4.00"]
incorrect_responses = ["", "3.9", "4.1", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_decimal_tolerance(self):
problem = self.build_problem(answer=4, tolerance=0.1)
correct_responses = ["4.0", "4.00", "4.09", "3.91"]
incorrect_responses = ["", "4.11", "3.89", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_percent_tolerance(self):
# Positive only range
problem = self.build_problem(answer=4, tolerance="10%")
correct_responses = ["4.0", "4.00", "4.39", "3.61"]
incorrect_responses = ["", "4.41", "3.59", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
# Negative only range
problem = self.build_problem(answer=-4, tolerance="10%")
correct_responses = ["-4.0", "-4.00", "-4.39", "-3.61"]
incorrect_responses = ["", "-4.41", "-3.59", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
# Mixed negative/positive range
problem = self.build_problem(answer=1, tolerance="200%")
correct_responses = ["1", "1.00", "2.99", "0.99"]
incorrect_responses = ["", "3.01", "-1.01"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_floats(self):
"""
Default tolerance for all responsetypes is 1e-3%.
"""
problem_setup = [
# [given_answer, [list of correct responses], [list of incorrect responses]]
[1, ["1"], ["1.1"]],
[2.0, ["2.0"], ["1.0"]],
[4, ["4.0", "4.00004"], ["4.00005"]],
[0.00016, ["1.6*10^-4"], [""]],
[0.000016, ["1.6*10^-5"], ["0.000165"]],
[1.9e24, ["1.9*10^24"], ["1.9001*10^24"]],
[2e-15, ["2*10^-15"], [""]],
[3141592653589793238., ["3141592653589793115."], [""]],
[0.1234567, ["0.123456", "0.1234561"], ["0.123451"]],
[1e-5, ["1e-5", "1.0e-5"], ["-1e-5", "2*1e-5"]],
]
for given_answer, correct_responses, incorrect_responses in problem_setup:
problem = self.build_problem(answer=given_answer)
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_grade_with_script(self):
script_text = "computed_response = math.sqrt(4)"
problem = self.build_problem(answer="$computed_response", script=script_text)
correct_responses = ["2", "2.0"]
incorrect_responses = ["", "2.01", "1.99", "0"]
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_raises_zero_division_err(self):
"""See if division by zero is handled correctly."""
problem = self.build_problem(answer="1") # Answer doesn't matter
input_dict = {'1_2_1': '1/0'}
with self.assertRaises(StudentInputError):
problem.grade_answers(input_dict)
def test_staff_inputs_expressions(self):
"""Test that staff may enter in an expression as the answer."""
problem = self.build_problem(answer="1/3", tolerance=1e-3)
correct_responses = ["1/3", "0.333333"]
incorrect_responses = []
self.assert_multiple_grade(problem, correct_responses, incorrect_responses)
def test_staff_inputs_expressions_legacy(self):
"""Test that staff may enter in a complex number as the answer."""
problem = self.build_problem(answer="1+1j", tolerance=1e-3)
self.assert_grade(problem, '1+j', 'correct')
@mock.patch('capa.responsetypes.log')
def test_staff_inputs_bad_syntax(self, mock_log):
"""Test that staff may enter in a complex number as the answer."""
staff_ans = "clearly bad syntax )[+1e"
problem = self.build_problem(answer=staff_ans, tolerance=1e-3)
msg = "There was a problem with the staff answer to this problem"
with self.assertRaisesRegexp(StudentInputError, msg):
self.assert_grade(problem, '1+j', 'correct')
mock_log.debug.assert_called_once_with(
"Content error--answer '%s' is not a valid number", staff_ans
)
@mock.patch('capa.responsetypes.log')
def test_responsetype_i18n(self, mock_log):
"""Test that LoncapaSystem has an i18n that works."""
staff_ans = "clearly bad syntax )[+1e"
problem = self.build_problem(answer=staff_ans, tolerance=1e-3)
class FakeTranslations(object):
"""A fake gettext.Translations object."""
def ugettext(self, text):
"""Return the 'translation' of `text`."""
if text == "There was a problem with the staff answer to this problem.":
text = "TRANSLATED!"
return text
problem.capa_system.i18n = FakeTranslations()
with self.assertRaisesRegexp(StudentInputError, "TRANSLATED!"):
self.assert_grade(problem, '1+j', 'correct')
def test_grade_infinity(self):
"""
Check that infinity doesn't automatically get marked correct.
This resolves a bug where a problem with relative tolerance would
pass with any arbitrarily large student answer.
"""
mapping = {
'some big input': float('inf'),
'some neg input': -float('inf'),
'weird NaN input': float('nan'),
'4': 4
}
def evaluator_side_effect(_, __, math_string):
"""Look up the given response for `math_string`."""
return mapping[math_string]
problem = self.build_problem(answer=4, tolerance='10%')
with mock.patch('capa.responsetypes.evaluator') as mock_eval:
mock_eval.side_effect = evaluator_side_effect
self.assert_grade(problem, 'some big input', 'incorrect')
self.assert_grade(problem, 'some neg input', 'incorrect')
self.assert_grade(problem, 'weird NaN input', 'incorrect')
def test_err_handling(self):
"""
See that `StudentInputError`s are raised when things go wrong.
"""
problem = self.build_problem(answer=4)
errors = [ # (exception raised, message to student)
(calc.UndefinedVariable("x"), r"You may not use variables \(x\) in numerical problems"),
(ValueError("factorial() mess-up"), "factorial function evaluated outside its domain"),
(ValueError(), "Could not interpret '.*' as a number"),
(pyparsing.ParseException("oopsie"), "Invalid math syntax"),
(ZeroDivisionError(), "Could not interpret '.*' as a number")
]
with mock.patch('capa.responsetypes.evaluator') as mock_eval:
for err, msg_regex in errors:
def evaluator_side_effect(_, __, math_string):
"""Raise an error only for the student input."""
if math_string != '4':
raise err
mock_eval.side_effect = evaluator_side_effect
with self.assertRaisesRegexp(StudentInputError, msg_regex):
problem.grade_answers({'1_2_1': 'foobar'})
def test_compare_answer(self):
"""Tests the answer compare function."""
problem = self.build_problem(answer="42")
responder = problem.responders.values()[0]
self.assertTrue(responder.compare_answer('48', '8*6'))
self.assertFalse(responder.compare_answer('48', '9*5'))
def test_validate_answer(self):
"""Tests the answer validation function."""
problem = self.build_problem(answer="42")
responder = problem.responders.values()[0]
self.assertTrue(responder.validate_answer('23.5'))
self.assertFalse(responder.validate_answer('fish'))
class CustomResponseTest(ResponseTest):
xml_factory_class = CustomResponseXMLFactory
def test_inline_code(self):
# For inline code, we directly modify global context variables
# 'answers' is a list of answers provided to us
# 'correct' is a list we fill in with True/False
# 'expect' is given to us (if provided in the XML)
inline_script = """correct[0] = 'correct' if (answers['1_2_1'] == expect) else 'incorrect'"""
problem = self.build_problem(answer=inline_script, expect="42")
# Check results
self.assert_grade(problem, '42', 'correct')
self.assert_grade(problem, '0', 'incorrect')
def test_inline_message(self):
# Inline code can update the global messages list
# to pass messages to the CorrectMap for a particular input
# The code can also set the global overall_message (str)
# to pass a message that applies to the whole response
inline_script = textwrap.dedent("""
messages[0] = "Test Message"
overall_message = "Overall message"
""")
problem = self.build_problem(answer=inline_script)
input_dict = {'1_2_1': '0'}
correctmap = problem.grade_answers(input_dict)
# Check that the message for the particular input was received
input_msg = correctmap.get_msg('1_2_1')
self.assertEqual(input_msg, "Test Message")
# Check that the overall message (for the whole response) was received
overall_msg = correctmap.get_overall_message()
self.assertEqual(overall_msg, "Overall message")
def test_inline_randomization(self):
# Make sure the seed from the problem gets fed into the script execution.
inline_script = "messages[0] = {code}".format(code=self._get_random_number_code())
problem = self.build_problem(answer=inline_script)
input_dict = {'1_2_1': '0'}
correctmap = problem.grade_answers(input_dict)
input_msg = correctmap.get_msg('1_2_1')
self.assertEqual(input_msg, self._get_random_number_result(problem.seed))
def test_function_code_single_input(self):
# For function code, we pass in these arguments:
#
# 'expect' is the expect attribute of the <customresponse>
#
# 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs)
#
# The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING } (no 'grade_decimal' key to test that it's optional)
#
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42")
# Correct answer
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
npoints = correct_map.get_npoints('1_2_1')
self.assertEqual(correctness, 'correct')
self.assertEqual(msg, "Message text")
self.assertEqual(npoints, 1)
# Incorrect answer
input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
npoints = correct_map.get_npoints('1_2_1')
self.assertEqual(correctness, 'incorrect')
self.assertEqual(msg, "Message text")
self.assertEqual(npoints, 0)
def test_function_code_single_input_decimal_score(self):
# For function code, we pass in these arguments:
#
# 'expect' is the expect attribute of the <customresponse>
#
# 'answer_given' is the answer the student gave (if there is just one input)
# or an ordered list of answers (if there are multiple inputs)
#
# The function should return a dict of the form
# { 'ok': BOOL, 'msg': STRING, 'grade_decimal': FLOAT }
#
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {
'ok': answer_given == expect,
'msg': 'Message text',
'grade_decimal': 0.9 if answer_given == expect else 0.1,
}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42")
# Correct answer
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.9)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
# Incorrect answer
input_dict = {'1_2_1': '43'}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
def test_function_code_multiple_input_no_msg(self):
# Check functions also have the option of returning
# a single boolean value
# If true, mark all the inputs correct
# If false, mark all the inputs incorrect
script = textwrap.dedent("""
def check_func(expect, answer_given):
return (answer_given[0] == expect and
answer_given[1] == expect)
""")
problem = self.build_problem(script=script, cfn="check_func",
expect="42", num_inputs=2)
# Correct answer -- expect both inputs marked correct
input_dict = {'1_2_1': '42', '1_2_2': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'correct')
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'correct')
# One answer incorrect -- expect both inputs marked incorrect
input_dict = {'1_2_1': '0', '1_2_2': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
self.assertEqual(correctness, 'incorrect')
correctness = correct_map.get_correctness('1_2_2')
self.assertEqual(correctness, 'incorrect')
def test_function_code_multiple_inputs(self):
# If the <customresponse> has multiple inputs associated with it,
# the check function can return a dict of the form:
#
# {'overall_message': STRING,
# 'input_list': [{'ok': BOOL, 'msg': STRING}, ...] } (no grade_decimal to test it's optional)
#
# 'overall_message' is displayed at the end of the response
#
# 'input_list' contains dictionaries representing the correctness
# and message for each input.
script = textwrap.dedent("""
def check_func(expect, answer_given):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
return {'overall_message': 'Overall message',
'input_list': [
{'ok': check1, 'msg': 'Feedback 1'},
{'ok': check2, 'msg': 'Feedback 2'},
{'ok': check3, 'msg': 'Feedback 3'} ] }
""")
problem = self.build_problem(script=script,
cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Expect that we receive the overall message (for the whole response)
self.assertEqual(correct_map.get_overall_message(), "Overall message")
# Expect that the inputs were graded individually
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
# Expect that the inputs were given correct npoints
self.assertEqual(correct_map.get_npoints('1_2_1'), 0)
self.assertEqual(correct_map.get_npoints('1_2_2'), 1)
self.assertEqual(correct_map.get_npoints('1_2_3'), 1)
# Expect that we received messages for each individual input
self.assertEqual(correct_map.get_msg('1_2_1'), 'Feedback 1')
self.assertEqual(correct_map.get_msg('1_2_2'), 'Feedback 2')
self.assertEqual(correct_map.get_msg('1_2_3'), 'Feedback 3')
def test_function_code_multiple_inputs_decimal_score(self):
# If the <customresponse> has multiple inputs associated with it,
# the check function can return a dict of the form:
#
# {'overall_message': STRING,
# 'input_list': [{'ok': BOOL, 'msg': STRING, 'grade_decimal': FLOAT}, ...] }
# #
# 'input_list' contains dictionaries representing the correctness
# and message for each input.
script = textwrap.dedent("""
def check_func(expect, answer_given):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
score1 = 0.9 if check1 else 0.1
score2 = 0.9 if check2 else 0.1
score3 = 0.9 if check3 else 0.1
return {
'input_list': [
{'ok': check1, 'grade_decimal': score1, 'msg': 'Feedback 1'},
{'ok': check2, 'grade_decimal': score2, 'msg': 'Feedback 2'},
{'ok': check3, 'grade_decimal': score3, 'msg': 'Feedback 3'},
]
}
""")
problem = self.build_problem(script=script, cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Expect that the inputs were graded individually
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
# Expect that the inputs were given correct npoints
self.assertEqual(correct_map.get_npoints('1_2_1'), 0.1)
self.assertEqual(correct_map.get_npoints('1_2_2'), 0.9)
self.assertEqual(correct_map.get_npoints('1_2_3'), 0.9)
def test_function_code_with_extra_args(self):
script = textwrap.dedent("""\
def check_func(expect, answer_given, options, dynamath):
assert options == "xyzzy", "Options was %r" % options
return {'ok': answer_given == expect, 'msg': 'Message text'}
""")
problem = self.build_problem(script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath")
# Correct answer
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'correct')
self.assertEqual(msg, "Message text")
# Incorrect answer
input_dict = {'1_2_1': '0'}
correct_map = problem.grade_answers(input_dict)
correctness = correct_map.get_correctness('1_2_1')
msg = correct_map.get_msg('1_2_1')
self.assertEqual(correctness, 'incorrect')
self.assertEqual(msg, "Message text")
def test_multiple_inputs_return_one_status(self):
# When given multiple inputs, the 'answer_given' argument
# to the check_func() is a list of inputs
#
# The sample script below marks the problem as correct
# if and only if it receives answer_given=[1,2,3]
# (or string values ['1','2','3'])
#
# Since we return a dict describing the status of one input,
# we expect that the same 'ok' value is applied to each
# of the inputs.
script = textwrap.dedent("""
def check_func(expect, answer_given):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
return {'ok': (check1 and check2 and check3),
'msg': 'Message text'}
""")
problem = self.build_problem(script=script,
cfn="check_func", num_inputs=3)
# Grade the inputs (one input incorrect)
input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked incorrect
self.assertEqual(correct_map.get_correctness('1_2_1'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'incorrect')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'incorrect')
# Grade the inputs (everything correct)
input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'}
correct_map = problem.grade_answers(input_dict)
# Everything marked incorrect
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_2'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_3'), 'correct')
# Message is interpreted as an "overall message"
self.assertEqual(correct_map.get_overall_message(), 'Message text')
def test_script_exception_function(self):
# Construct a script that will raise an exception
script = textwrap.dedent("""
def check_func(expect, answer_given):
raise Exception("Test")
""")
problem = self.build_problem(script=script, cfn="check_func")
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_script_exception_inline(self):
# Construct a script that will raise an exception
script = 'raise Exception("Test")'
problem = self.build_problem(answer=script)
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_invalid_dict_exception(self):
# Construct a script that passes back an invalid dict format
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {'invalid': 'test'}
""")
problem = self.build_problem(script=script, cfn="check_func")
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
problem.grade_answers({'1_2_1': '42'})
def test_setup_randomization(self):
# Ensure that the problem setup script gets the random seed from the problem.
script = textwrap.dedent("""
num = {code}
""".format(code=self._get_random_number_code()))
problem = self.build_problem(script=script)
self.assertEqual(problem.context['num'], self._get_random_number_result(problem.seed))
def test_check_function_randomization(self):
# The check function should get random-seeded from the problem.
script = textwrap.dedent("""
def check_func(expect, answer_given):
return {{'ok': True, 'msg': {code} }}
""".format(code=self._get_random_number_code()))
problem = self.build_problem(script=script, cfn="check_func", expect="42")
input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict)
msg = correct_map.get_msg('1_2_1')
self.assertEqual(msg, self._get_random_number_result(problem.seed))
def test_random_isnt_none(self):
# Bug LMS-500 says random.seed(10) fails with:
# File "<string>", line 61, in <module>
# File "/usr/lib/python2.7/random.py", line 116, in seed
# super(Random, self).seed(a)
# TypeError: must be type, not None
r = random.Random()
r.seed(10)
num = r.randint(0, 1e9)
script = textwrap.dedent("""
random.seed(10)
num = random.randint(0, 1e9)
""")
problem = self.build_problem(script=script)
self.assertEqual(problem.context['num'], num)
def test_module_imports_inline(self):
'''
Check that the correct modules are available to custom
response scripts
'''
for module_name in ['random', 'numpy', 'math', 'scipy',
'calc', 'eia', 'chemcalc', 'chemtools',
'miller', 'draganddrop']:
# Create a script that checks that the name is defined
# If the name is not defined, then the script
# will raise an exception
script = textwrap.dedent('''
correct[0] = 'correct'
assert('%s' in globals())''' % module_name)
# Create the problem
problem = self.build_problem(answer=script)
# Expect that we can grade an answer without
# getting an exception
try:
problem.grade_answers({'1_2_1': '42'})
except ResponseError:
self.fail("Could not use name '{0}s' in custom response".format(module_name))
def test_module_imports_function(self):
'''
Check that the correct modules are available to custom
response scripts
'''
for module_name in ['random', 'numpy', 'math', 'scipy',
'calc', 'eia', 'chemcalc', 'chemtools',
'miller', 'draganddrop']:
# Create a script that checks that the name is defined
# If the name is not defined, then the script
# will raise an exception
script = textwrap.dedent('''
def check_func(expect, answer_given):
assert('%s' in globals())
return True''' % module_name)
# Create the problem
problem = self.build_problem(script=script, cfn="check_func")
# Expect that we can grade an answer without
# getting an exception
try:
problem.grade_answers({'1_2_1': '42'})
except ResponseError:
self.fail("Could not use name '{0}s' in custom response".format(module_name))
def test_python_lib_zip_is_available(self):
# Prove that we can import code from a zipfile passed down to us.
# Make a zipfile with one module in it with one function.
zipstring = StringIO()
zipf = zipfile.ZipFile(zipstring, "w")
zipf.writestr("my_helper.py", textwrap.dedent("""\
def seventeen():
return 17
"""))
zipf.close()
# Use that module in our Python script.
script = textwrap.dedent("""
import my_helper
num = my_helper.seventeen()
""")
capa_system = test_capa_system()
capa_system.get_python_lib_zip = lambda: zipstring.getvalue()
problem = self.build_problem(script=script, capa_system=capa_system)
self.assertEqual(problem.context['num'], 17)
def test_function_code_multiple_inputs_order(self):
# Ensure that order must be correct according to sub-problem position
script = textwrap.dedent("""
def check_func(expect, answer_given):
check1 = (int(answer_given[0]) == 1)
check2 = (int(answer_given[1]) == 2)
check3 = (int(answer_given[2]) == 3)
check4 = (int(answer_given[3]) == 4)
check5 = (int(answer_given[4]) == 5)
check6 = (int(answer_given[5]) == 6)
check7 = (int(answer_given[6]) == 7)
check8 = (int(answer_given[7]) == 8)
check9 = (int(answer_given[8]) == 9)
check10 = (int(answer_given[9]) == 10)
check11 = (int(answer_given[10]) == 11)
return {'overall_message': 'Overall message',
'input_list': [
{ 'ok': check1, 'msg': '1'},
{ 'ok': check2, 'msg': '2'},
{ 'ok': check3, 'msg': '3'},
{ 'ok': check4, 'msg': '4'},
{ 'ok': check5, 'msg': '5'},
{ 'ok': check6, 'msg': '6'},
{ 'ok': check7, 'msg': '7'},
{ 'ok': check8, 'msg': '8'},
{ 'ok': check9, 'msg': '9'},
{ 'ok': check10, 'msg': '10'},
{ 'ok': check11, 'msg': '11'},
]}
""")
problem = self.build_problem(script=script, cfn="check_func", num_inputs=11)
# Grade the inputs showing out of order
input_dict = {
'1_2_1': '1',
'1_2_2': '2',
'1_2_3': '3',
'1_2_4': '4',
'1_2_5': '5',
'1_2_6': '6',
'1_2_10': '10',
'1_2_11': '16',
'1_2_7': '7',
'1_2_8': '8',
'1_2_9': '9'
}
correct_order = [
'1_2_1', '1_2_2', '1_2_3', '1_2_4', '1_2_5', '1_2_6', '1_2_7', '1_2_8', '1_2_9', '1_2_10', '1_2_11'
]
correct_map = problem.grade_answers(input_dict)
self.assertNotEqual(problem.student_answers.keys(), correct_order)
# euqal to correct order after sorting at get_score
self.assertListEqual(problem.responders.values()[0].context['idset'], correct_order)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_9'), 'correct')
self.assertEqual(correct_map.get_correctness('1_2_11'), 'incorrect')
self.assertEqual(correct_map.get_msg('1_2_1'), '1')
self.assertEqual(correct_map.get_msg('1_2_9'), '9')
self.assertEqual(correct_map.get_msg('1_2_11'), '11')
class SchematicResponseTest(ResponseTest):
"""
Class containing setup and tests for Schematic responsetype.
"""
xml_factory_class = SchematicResponseXMLFactory
def test_grade(self):
# Most of the schematic-specific work is handled elsewhere
# (in client-side JavaScript)
# The <schematicresponse> is responsible only for executing the
# Python code in <answer> with *submission* (list)
# in the global context.
# To test that the context is set up correctly,
# we create a script that sets *correct* to true
# if and only if we find the *submission* (list)
script = "correct = ['correct' if 'test' in submission[0] else 'incorrect']"
problem = self.build_problem(answer=script)
# The actual dictionary would contain schematic information
# sent from the JavaScript simulation
submission_dict = {'test': 'the_answer'}
input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict)
# Expect that the problem is graded as true
# (That is, our script verifies that the context
# is what we expect)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
def test_check_function_randomization(self):
# The check function should get a random seed from the problem.
script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format(code=self._get_random_number_code())
problem = self.build_problem(answer=script)
submission_dict = {'num': self._get_random_number_result(problem.seed)}
input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict)
self.assertEqual(correct_map.get_correctness('1_2_1'), 'correct')
def test_script_exception(self):
# Construct a script that will raise an exception
script = "raise Exception('test')"
problem = self.build_problem(answer=script)
# Expect that an exception gets raised when we check the answer
with self.assertRaises(ResponseError):
submission_dict = {'test': 'test'}
input_dict = {'1_2_1': json.dumps(submission_dict)}
problem.grade_answers(input_dict)
class AnnotationResponseTest(ResponseTest):
xml_factory_class = AnnotationResponseXMLFactory
def test_grade(self):
(correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect')
answer_id = '1_2_1'
options = (('x', correct), ('y', partially), ('z', incorrect))
make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids})}
tests = [
{'correctness': correct, 'points': 2, 'answers': make_answer([0])},
{'correctness': partially, 'points': 1, 'answers': make_answer([1])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer([2])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer([0, 1, 2])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer([])},
{'correctness': incorrect, 'points': 0, 'answers': make_answer('')},
{'correctness': incorrect, 'points': 0, 'answers': make_answer(None)},
{'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}},
]
for test in tests:
expected_correctness = test['correctness']
expected_points = test['points']
answers = test['answers']
problem = self.build_problem(options=options)
correct_map = problem.grade_answers(answers)
actual_correctness = correct_map.get_correctness(answer_id)
actual_points = correct_map.get_npoints(answer_id)
self.assertEqual(expected_correctness, actual_correctness,
msg="%s should be marked %s" % (answer_id, expected_correctness))
self.assertEqual(expected_points, actual_points,
msg="%s should have %d points" % (answer_id, expected_points))
class ChoiceTextResponseTest(ResponseTest):
"""
Class containing setup and tests for ChoiceText responsetype.
"""
xml_factory_class = ChoiceTextResponseXMLFactory
# `TEST_INPUTS` is a dictionary mapping from
# test_name to a representation of inputs for a test problem.
TEST_INPUTS = {
"1_choice_0_input_correct": [(True, [])],
"1_choice_0_input_incorrect": [(False, [])],
"1_choice_0_input_invalid_choice": [(False, []), (True, [])],
"1_choice_1_input_correct": [(True, ["123"])],
"1_input_script_correct": [(True, ["2"])],
"1_input_script_incorrect": [(True, ["3.25"])],
"1_choice_2_inputs_correct": [(True, ["123", "456"])],
"1_choice_2_inputs_tolerance": [(True, ["123 + .5", "456 + 9"])],
"1_choice_2_inputs_1_wrong": [(True, ["0", "456"])],
"1_choice_2_inputs_both_wrong": [(True, ["0", "0"])],
"1_choice_2_inputs_inputs_blank": [(True, ["", ""])],
"1_choice_2_inputs_empty": [(False, [])],
"1_choice_2_inputs_fail_tolerance": [(True, ["123 + 1.5", "456 + 9"])],
"1_choice_1_input_within_tolerance": [(True, ["122.5"])],
"1_choice_1_input_answer_incorrect": [(True, ["345"])],
"1_choice_1_input_choice_incorrect": [(False, ["123"])],
"2_choices_0_inputs_correct": [(False, []), (True, [])],
"2_choices_0_inputs_incorrect": [(True, []), (False, [])],
"2_choices_0_inputs_blank": [(False, []), (False, [])],
"2_choices_1_input_1_correct": [(False, []), (True, ["123"])],
"2_choices_1_input_1_incorrect": [(True, []), (False, ["123"])],
"2_choices_1_input_input_wrong": [(False, []), (True, ["321"])],
"2_choices_1_input_1_blank": [(False, []), (False, [])],
"2_choices_1_input_2_correct": [(True, []), (False, ["123"])],
"2_choices_1_input_2_incorrect": [(False, []), (True, ["123"])],
"2_choices_2_inputs_correct": [(True, ["123"]), (False, [])],
"2_choices_2_inputs_wrong_choice": [(False, ["123"]), (True, [])],
"2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])]
}
# `TEST_SCENARIOS` is a dictionary of the form
# {Test_Name" : (Test_Problem_name, correctness)}
# correctness represents whether the problem should be graded as
# correct or incorrect when the test is run.
TEST_SCENARIOS = {
"1_choice_0_input_correct": ("1_choice_0_input", "correct"),
"1_choice_0_input_incorrect": ("1_choice_0_input", "incorrect"),
"1_choice_0_input_invalid_choice": ("1_choice_0_input", "incorrect"),
"1_input_script_correct": ("1_input_script", "correct"),
"1_input_script_incorrect": ("1_input_script", "incorrect"),
"1_choice_2_inputs_correct": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_tolerance": ("1_choice_2_inputs", "correct"),
"1_choice_2_inputs_1_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_both_wrong": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_inputs_blank": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_empty": ("1_choice_2_inputs", "incorrect"),
"1_choice_2_inputs_fail_tolerance": ("1_choice_2_inputs", "incorrect"),
"1_choice_1_input_correct": ("1_choice_1_input", "correct"),
"1_choice_1_input_within_tolerance": ("1_choice_1_input", "correct"),
"1_choice_1_input_answer_incorrect": ("1_choice_1_input", "incorrect"),
"1_choice_1_input_choice_incorrect": ("1_choice_1_input", "incorrect"),
"2_choices_0_inputs_correct": ("2_choices_0_inputs", "correct"),
"2_choices_0_inputs_incorrect": ("2_choices_0_inputs", "incorrect"),
"2_choices_0_inputs_blank": ("2_choices_0_inputs", "incorrect"),
"2_choices_1_input_1_correct": ("2_choices_1_input_1", "correct"),
"2_choices_1_input_1_incorrect": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_input_wrong": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_1_blank": ("2_choices_1_input_1", "incorrect"),
"2_choices_1_input_2_correct": ("2_choices_1_input_2", "correct"),
"2_choices_1_input_2_incorrect": ("2_choices_1_input_2", "incorrect"),
"2_choices_2_inputs_correct": ("2_choices_2_inputs", "correct"),
"2_choices_2_inputs_wrong_choice": ("2_choices_2_inputs", "incorrect"),
"2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect")
}
# Dictionary that maps from problem_name to arguments for
# _make_problem, that will create the problem.
TEST_PROBLEM_ARGS = {
"1_choice_0_input": {"choices": ("true", {}), "script": ''},
"1_choice_1_input": {
"choices": ("true", {"answer": "123", "tolerance": "1"}),
"script": ''
},
"1_input_script": {
"choices": ("true", {"answer": "$computed_response", "tolerance": "1"}),
"script": "computed_response = math.sqrt(4)"
},
"1_choice_2_inputs": {
"choices": [
(
"true", (
{"answer": "123", "tolerance": "1"},
{"answer": "456", "tolerance": "10"}
)
)
],
"script": ''
},
"2_choices_0_inputs": {
"choices": [("false", {}), ("true", {})],
"script": ''
},
"2_choices_1_input_1": {
"choices": [
("false", {}), ("true", {"answer": "123", "tolerance": "0"})
],
"script": ''
},
"2_choices_1_input_2": {
"choices": [("true", {}), ("false", {"answer": "123", "tolerance": "0"})],
"script": ''
},
"2_choices_2_inputs": {
"choices": [
("true", {"answer": "123", "tolerance": "0"}),
("false", {"answer": "999", "tolerance": "0"})
],
"script": ''
}
}
def _make_problem(self, choices, in_type='radiotextgroup', script=''):
"""
Convenience method to fill in default values for script and
type if needed, then call self.build_problem
"""
return self.build_problem(
choices=choices,
type=in_type,
script=script
)
def _make_answer_dict(self, choice_list):
"""
Convenience method to make generation of answers less tedious,
pass in an iterable argument with elements of the form: [bool, [ans,]]
Will generate an answer dict for those options
"""
answer_dict = {}
for index, choice_answers_pair in enumerate(choice_list):
# Choice is whether this choice is correct
# Answers contains a list of answers to textinpts for the choice
choice, answers = choice_answers_pair
if choice:
# Radio/Checkbox inputs in choicetext problems follow
# a naming convention that gives them names ending with "bc"
choice_id = "1_2_1_choiceinput_{index}bc".format(index=index)
choice_value = "choiceinput_{index}".format(index=index)
answer_dict[choice_id] = choice_value
# Build the names for the numtolerance_inputs and add their answers
# to `answer_dict`.
for ind, answer in enumerate(answers):
# In `answer_id` `index` represents the ordinality of the
# choice and `ind` represents the ordinality of the
# numtolerance_input inside the parent choice.
answer_id = "1_2_1_choiceinput_{index}_numtolerance_input_{ind}".format(
index=index,
ind=ind
)
answer_dict[answer_id] = answer
return answer_dict
def test_invalid_xml(self):
"""
Test that build problem raises errors for invalid options
"""
with self.assertRaises(Exception):
self.build_problem(type="invalidtextgroup")
def test_unchecked_input_not_validated(self):
"""
Test that a student can have a non numeric answer in an unselected
choice without causing an error to be raised when the problem is
checked.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (False, ["Platypus"])]),
"incorrect"
)
def test_interpret_error(self):
"""
Test that student answers that cannot be interpeted as numbers
cause the response type to raise an error.
"""
two_choice_two_input = self._make_problem(
[
("true", {"answer": "123", "tolerance": "1"}),
("false", {})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected correct choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["Platypus"])]),
"correct"
)
with self.assertRaisesRegexp(StudentInputError, "Could not interpret"):
# Test that error is raised for input in selected incorrect choice.
self.assert_grade(
two_choice_two_input,
self._make_answer_dict([(True, ["1"]), (True, ["Platypus"])]),
"correct"
)
def test_staff_answer_error(self):
broken_problem = self._make_problem(
[("true", {"answer": "Platypus", "tolerance": "0"}),
("true", {"answer": "edX", "tolerance": "0"})
],
"checkboxtextgroup"
)
with self.assertRaisesRegexp(
StudentInputError,
"The Staff answer could not be interpreted as a number."
):
self.assert_grade(
broken_problem,
self._make_answer_dict(
[(True, ["1"]), (True, ["1"])]
),
"correct"
)
def test_radio_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
radiotextgroup.
"""
for name, inputs in self.TEST_INPUTS.iteritems():
# Turn submission into the form expected when grading this problem.
submission = self._make_answer_dict(inputs)
# Lookup the problem_name, and the whether this test problem
# and inputs should be graded as correct or incorrect.
problem_name, correctness = self.TEST_SCENARIOS[name]
# Load the args needed to build the problem for this test.
problem_args = self.TEST_PROBLEM_ARGS[problem_name]
test_choices = problem_args["choices"]
test_script = problem_args["script"]
# Build the actual problem for the test.
test_problem = self._make_problem(test_choices, 'radiotextgroup', test_script)
# Make sure the actual grade matches the expected grade.
self.assert_grade(
test_problem,
submission,
correctness,
msg="{0} should be {1}".format(
name,
correctness
)
)
def test_checkbox_grades(self):
"""
Test that confirms correct operation of grading when the inputtag is
checkboxtextgroup.
"""
# Dictionary from name of test_scenario to (problem_name, correctness)
# Correctness is used to test whether the problem was graded properly
scenarios = {
"2_choices_correct": ("checkbox_two_choices", "correct"),
"2_choices_incorrect": ("checkbox_two_choices", "incorrect"),
"2_choices_2_inputs_correct": (
"checkbox_2_choices_2_inputs",
"correct"
),
"2_choices_2_inputs_missing_choice": (
"checkbox_2_choices_2_inputs",
"incorrect"
),
"2_choices_2_inputs_wrong_input": (
"checkbox_2_choices_2_inputs",
"incorrect"
)
}
# Dictionary scenario_name: test_inputs
inputs = {
"2_choices_correct": [(True, []), (True, [])],
"2_choices_incorrect": [(True, []), (False, [])],
"2_choices_2_inputs_correct": [(True, ["123"]), (True, ["456"])],
"2_choices_2_inputs_missing_choice": [
(True, ["123"]), (False, ["456"])
],
"2_choices_2_inputs_wrong_input": [
(True, ["123"]), (True, ["654"])
]
}
# Two choice zero input problem with both choices being correct.
checkbox_two_choices = self._make_problem(
[("true", {}), ("true", {})], "checkboxtextgroup"
)
# Two choice two input problem with both choices correct.
checkbox_two_choices_two_inputs = self._make_problem(
[("true", {"answer": "123", "tolerance": "0"}),
("true", {"answer": "456", "tolerance": "0"})
],
"checkboxtextgroup"
)
# Dictionary problem_name: problem
problems = {
"checkbox_two_choices": checkbox_two_choices,
"checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs
}
for name, inputs in inputs.iteritems():
submission = self._make_answer_dict(inputs)
# Load the test problem's name and desired correctness
problem_name, correctness = scenarios[name]
# Load the problem
problem = problems[problem_name]
# Make sure the actual grade matches the expected grade
self.assert_grade(
problem,
submission,
correctness,
msg="{0} should be {1}".format(name, correctness)
)
| agpl-3.0 |
revmischa/boto | boto/rds/regioninfo.py | 167 | 1513 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class RDSRegionInfo(RegionInfo):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
from boto.rds import RDSConnection
super(RDSRegionInfo, self).__init__(connection, name, endpoint,
RDSConnection)
| mit |
ChaoSBYNN/Tools | Data Mining/Spark Examples/src/main/python/streaming/queue_stream.py | 150 | 1763 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create a queue of RDDs that will be mapped/reduced one at a time in
1 second intervals.
To run this example use
`$ bin/spark-submit examples/src/main/python/streaming/queue_stream.py
"""
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
sc = SparkContext(appName="PythonStreamingQueueStream")
ssc = StreamingContext(sc, 1)
# Create the queue through which RDDs can be pushed to
# a QueueInputDStream
rddQueue = []
for i in range(5):
rddQueue += [ssc.sparkContext.parallelize([j for j in range(1, 1001)], 10)]
# Create the QueueInputDStream and use it do some processing
inputStream = ssc.queueStream(rddQueue)
mappedStream = inputStream.map(lambda x: (x % 10, 1))
reducedStream = mappedStream.reduceByKey(lambda a, b: a + b)
reducedStream.pprint()
ssc.start()
time.sleep(6)
ssc.stop(stopSparkContext=True, stopGraceFully=True)
| gpl-3.0 |
edunham/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_assertion.py | 170 | 19078 | # -*- coding: utf-8 -*-
import sys
import textwrap
import _pytest.assertion as plugin
import _pytest._code
import py
import pytest
from _pytest.assertion import reinterpret
from _pytest.assertion import util
PY3 = sys.version_info >= (3, 0)
@pytest.fixture
def mock_config():
class Config(object):
verbose = False
def getoption(self, name):
if name == 'verbose':
return self.verbose
raise KeyError('Not mocked out: %s' % name)
return Config()
def interpret(expr):
return reinterpret.reinterpret(expr, _pytest._code.Frame(sys._getframe(1)))
class TestBinReprIntegration:
def test_pytest_assertrepr_compare_called(self, testdir):
testdir.makeconftest("""
l = []
def pytest_assertrepr_compare(op, left, right):
l.append((op, left, right))
def pytest_funcarg__l(request):
return l
""")
testdir.makepyfile("""
def test_hello():
assert 0 == 1
def test_check(l):
assert l == [("==", 0, 1)]
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*test_hello*FAIL*",
"*test_check*PASS*",
])
def callequal(left, right, verbose=False):
config = mock_config()
config.verbose = verbose
return plugin.pytest_assertrepr_compare(config, '==', left, right)
class TestAssert_reprcompare:
def test_different_types(self):
assert callequal([0, 1], 'foo') is None
def test_summary(self):
summary = callequal([0, 1], [0, 2])[0]
assert len(summary) < 65
def test_text_diff(self):
diff = callequal('spam', 'eggs')[1:]
assert '- spam' in diff
assert '+ eggs' in diff
def test_text_skipping(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs')
assert 'Skipping' in lines[1]
for line in lines:
assert 'a'*50 not in line
def test_text_skipping_verbose(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True)
assert '- ' + 'a'*50 + 'spam' in lines
assert '+ ' + 'a'*50 + 'eggs' in lines
def test_multiline_text_diff(self):
left = 'foo\nspam\nbar'
right = 'foo\neggs\nbar'
diff = callequal(left, right)
assert '- spam' in diff
assert '+ eggs' in diff
def test_list(self):
expl = callequal([0, 1], [0, 2])
assert len(expl) > 1
@pytest.mark.parametrize(
['left', 'right', 'expected'], [
([0, 1], [0, 2], """
Full diff:
- [0, 1]
? ^
+ [0, 2]
? ^
"""),
({0: 1}, {0: 2}, """
Full diff:
- {0: 1}
? ^
+ {0: 2}
? ^
"""),
(set([0, 1]), set([0, 2]), """
Full diff:
- set([0, 1])
? ^
+ set([0, 2])
? ^
""" if not PY3 else """
Full diff:
- {0, 1}
? ^
+ {0, 2}
? ^
""")
]
)
def test_iterable_full_diff(self, left, right, expected):
"""Test the full diff assertion failure explanation.
When verbose is False, then just a -v notice to get the diff is rendered,
when verbose is True, then ndiff of the pprint is returned.
"""
expl = callequal(left, right, verbose=False)
assert expl[-1] == 'Use -v to get the full diff'
expl = '\n'.join(callequal(left, right, verbose=True))
assert expl.endswith(textwrap.dedent(expected).strip())
def test_list_different_lenghts(self):
expl = callequal([0, 1], [0, 1, 2])
assert len(expl) > 1
expl = callequal([0, 1, 2], [0, 1])
assert len(expl) > 1
def test_dict(self):
expl = callequal({'a': 0}, {'a': 1})
assert len(expl) > 1
def test_dict_omitting(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1})
assert lines[1].startswith('Omitting 1 identical item')
assert 'Common items' not in lines
for line in lines[1:]:
assert 'b' not in line
def test_dict_omitting_verbose(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True)
assert lines[1].startswith('Common items:')
assert 'Omitting' not in lines[1]
assert lines[2] == "{'b': 1}"
def test_set(self):
expl = callequal(set([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_frozenzet(self):
expl = callequal(frozenset([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_Sequence(self):
col = py.builtin._tryimport(
"collections.abc",
"collections",
"sys")
if not hasattr(col, "MutableSequence"):
pytest.skip("cannot import MutableSequence")
MutableSequence = col.MutableSequence
class TestSequence(MutableSequence): # works with a Sequence subclass
def __init__(self, iterable):
self.elements = list(iterable)
def __getitem__(self, item):
return self.elements[item]
def __len__(self):
return len(self.elements)
def __setitem__(self, item, value):
pass
def __delitem__(self, item):
pass
def insert(self, item, index):
pass
expl = callequal(TestSequence([0, 1]), list([0, 2]))
assert len(expl) > 1
def test_list_tuples(self):
expl = callequal([], [(1,2)])
assert len(expl) > 1
expl = callequal([(1,2)], [])
assert len(expl) > 1
def test_list_bad_repr(self):
class A:
def __repr__(self):
raise ValueError(42)
expl = callequal([], [A()])
assert 'ValueError' in "".join(expl)
expl = callequal({}, {'1': A()})
assert 'faulty' in "".join(expl)
def test_one_repr_empty(self):
"""
the faulty empty string repr did trigger
a unbound local error in _diff_text
"""
class A(str):
def __repr__(self):
return ''
expl = callequal(A(), '')
assert not expl
def test_repr_no_exc(self):
expl = ' '.join(callequal('foo', 'bar'))
assert 'raised in repr()' not in expl
def test_unicode(self):
left = py.builtin._totext('£€', 'utf-8')
right = py.builtin._totext('£', 'utf-8')
expl = callequal(left, right)
assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8')
assert expl[1] == py.builtin._totext('- £€', 'utf-8')
assert expl[2] == py.builtin._totext('+ £', 'utf-8')
def test_nonascii_text(self):
"""
:issue: 877
non ascii python2 str caused a UnicodeDecodeError
"""
class A(str):
def __repr__(self):
return '\xff'
expl = callequal(A(), '1')
assert expl
def test_format_nonascii_explanation(self):
assert util.format_explanation('λ')
def test_mojibake(self):
# issue 429
left = 'e'
right = '\xc3\xa9'
if not isinstance(left, py.builtin.bytes):
left = py.builtin.bytes(left, 'utf-8')
right = py.builtin.bytes(right, 'utf-8')
expl = callequal(left, right)
for line in expl:
assert isinstance(line, py.builtin.text)
msg = py.builtin._totext('\n').join(expl)
assert msg
class TestFormatExplanation:
def test_special_chars_full(self, testdir):
# Issue 453, for the bug this would raise IndexError
testdir.makepyfile("""
def test_foo():
assert '\\n}' == ''
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*",
])
def test_fmt_simple(self):
expl = 'assert foo'
assert util.format_explanation(expl) == 'assert foo'
def test_fmt_where(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo'])
assert util.format_explanation(expl) == res
def test_fmt_and(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_where_nested(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'{foo = bar',
'}',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + where foo = bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline(self):
expl = '\n'.join(['assert "foo" == "bar"',
'~- foo',
'~+ bar'])
res = '\n'.join(['assert "foo" == "bar"',
' - foo',
' + bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline_escaped(self):
expl = '\n'.join(['assert foo == bar',
'baz'])
res = 'assert foo == bar\\nbaz'
assert util.format_explanation(expl) == res
def test_fmt_newline_before_where(self):
expl = '\n'.join(['the assertion message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_multi_newline_before_where(self):
expl = '\n'.join(['the assertion',
'~message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion',
' message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_python25_compile_issue257(testdir):
testdir.makepyfile("""
def test_rewritten():
assert 1 == 2
# some comment
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines("""
*E*assert 1 == 2*
*1 failed*
""")
def test_rewritten(testdir):
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()
""")
assert testdir.runpytest().ret == 0
def test_reprcompare_notin(mock_config):
detail = plugin.pytest_assertrepr_compare(
mock_config, 'not in', 'foo', 'aaafoobbb')[1:]
assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
def test_pytest_assertrepr_compare_integration(testdir):
testdir.makepyfile("""
def test_hello():
x = set(range(100))
y = x.copy()
y.remove(50)
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*50*",
])
def test_sequence_comparison_uses_repr(testdir):
testdir.makepyfile("""
def test_hello():
x = set("hello x")
y = set("hello y")
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*'x'*",
"*E*Extra items*right*",
"*E*'y'*",
])
def test_assert_compare_truncate_longmessage(monkeypatch, testdir):
testdir.makepyfile(r"""
def test_long():
a = list(range(200))
b = a[::2]
a = '\n'.join(map(str, a))
b = '\n'.join(map(str, b))
assert a == b
""")
monkeypatch.delenv('CI', raising=False)
result = testdir.runpytest()
# without -vv, truncate the message showing a few diff lines only
result.stdout.fnmatch_lines([
"*- 1",
"*- 3",
"*- 5",
"*- 7",
"*truncated (191 more lines)*use*-vv*",
])
result = testdir.runpytest('-vv')
result.stdout.fnmatch_lines([
"*- 197",
])
monkeypatch.setenv('CI', '1')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*- 197",
])
def test_assertrepr_loaded_per_dir(testdir):
testdir.makepyfile(test_base=['def test_base(): assert 1 == 2'])
a = testdir.mkdir('a')
a_test = a.join('test_a.py')
a_test.write('def test_a(): assert 1 == 2')
a_conftest = a.join('conftest.py')
a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
b = testdir.mkdir('b')
b_test = b.join('test_b.py')
b_test.write('def test_b(): assert 1 == 2')
b_conftest = b.join('conftest.py')
b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*def test_base():*',
'*E*assert 1 == 2*',
'*def test_a():*',
'*E*assert summary a*',
'*def test_b():*',
'*E*assert summary b*'])
def test_assertion_options(testdir):
testdir.makepyfile("""
def test_hello():
x = 3
assert x == 4
""")
result = testdir.runpytest()
assert "3 == 4" in result.stdout.str()
off_options = (("--no-assert",),
("--nomagic",),
("--no-assert", "--nomagic"),
("--assert=plain",),
("--assert=plain", "--no-assert"),
("--assert=plain", "--nomagic"),
("--assert=plain", "--no-assert", "--nomagic"))
for opt in off_options:
result = testdir.runpytest_subprocess(*opt)
assert "3 == 4" not in result.stdout.str()
def test_old_assert_mode(testdir):
testdir.makepyfile("""
def test_in_old_mode():
assert "@py_builtins" not in globals()
""")
result = testdir.runpytest_subprocess("--assert=reinterp")
assert result.ret == 0
def test_triple_quoted_string_issue113(testdir):
testdir.makepyfile("""
def test_hello():
assert "" == '''
'''""")
result = testdir.runpytest("--fulltrace")
result.stdout.fnmatch_lines([
"*1 failed*",
])
assert 'SyntaxError' not in result.stdout.str()
def test_traceback_failure(testdir):
p1 = testdir.makepyfile("""
def g():
return 2
def f(x):
assert x == g()
def test_onefails():
f(3)
""")
result = testdir.runpytest(p1, "--tb=long")
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"_ _ _ *",
#"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
result = testdir.runpytest(p1) # "auto"
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" )
def test_warn_missing(testdir):
testdir.makepyfile("")
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
def test_recursion_source_decode(testdir):
testdir.makepyfile("""
def test_something():
pass
""")
testdir.makeini("""
[pytest]
python_files = *.py
""")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines("""
<Module*>
""")
def test_AssertionError_message(testdir):
testdir.makepyfile("""
def test_hello():
x,y = 1,2
assert 0, (x,y)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*def test_hello*
*assert 0, (x,y)*
*AssertionError: (1, 2)*
""")
@pytest.mark.skipif(PY3, reason='This bug does not exist on PY3')
def test_set_with_unsortable_elements():
# issue #718
class UnsortableKey(object):
def __init__(self, name):
self.name = name
def __lt__(self, other):
raise RuntimeError()
def __repr__(self):
return 'repr({0})'.format(self.name)
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
left_set = set(UnsortableKey(str(i)) for i in range(1, 3))
right_set = set(UnsortableKey(str(i)) for i in range(2, 4))
expl = callequal(left_set, right_set, verbose=True)
# skip first line because it contains the "construction" of the set, which does not have a guaranteed order
expl = expl[1:]
dedent = textwrap.dedent("""
Extra items in the left set:
repr(1)
Extra items in the right set:
repr(3)
Full diff (fallback to calling repr on each item):
- repr(1)
repr(2)
+ repr(3)
""").strip()
assert '\n'.join(expl) == dedent
| mpl-2.0 |
wil/pyroman | pyroman/commands.py | 1 | 8269 | """
Commands to be used in pyroman rules files.
allow, reject, drop are just convenience commands, that can be replaced by
add_rule(Firewall.allow, ...) etc. but that are easier to read.
"""
#Copyright (c) 2011 Erich Schubert erich@debian.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyroman import Firewall
from util import Util
from chain import Chain
from exception import PyromanException
import port, service, interface, host, nat, rule
def add_service(name, sports="", dports="", include=None):
"""
Add a new named service to the list of services
name -- name of the new service
sports -- source port specification like "www/tcp 53/udp"
dports -- destination port specification
include -- services to be included / aliased
Note that services can be autocreated when names such as "www/tcp" or
"53/udp" are used, so you mainly use this to group services or make easier
aliases (e.g. "www" = "http/tcp https/tcp")
"""
loginfo = Util.get_callee(3)
service.Service(name, sports, dports, include, loginfo)
def add_interface(name, iface):
"""
Create a new named interface
name -- name for this interface (-group)
iface -- kernel interfaces in this group, e.g. "eth0 eth1"
"""
loginfo = Util.get_callee(3)
interface.Interface(name, iface, loginfo)
def add_host(name, ip, iface, hostname=None):
"""
Create a new host object.
name -- Nickname for the host
ip -- IP specification for host or subnet (e.g. "127.0.0.1 10.0.0.0/24")
iface -- Interface nickname this is connected to (only one!)
hostname -- Real hostname, as returned by "hostname". Used for
"localhost" detection only (i.e. use INPUT and OUTPUT, not
FORWARD chains), so only needed for these hosts. Defaults to
the nickname, which will usually be fine. You can use
hostname = Firewall.hostname to make e.g. a broadcast "host"
always "local".
"""
loginfo = Util.get_callee(3)
if not hostname:
hostname = name
host.Host(name, ip, iface, hostname, loginfo)
def add_nat(client="", server=None, ip=None, port=None, dport=None, dir="in"):
"""
Create a new NAT rule
client -- clients that may use this NAT
server -- server to be accessed via this NAT
ip -- IP that the NAT redirects/uses
port -- Ports that are redirected by the NAT
dport -- Destination port for the NAT
dir -- set to "in", "out" or "both" for directions, default is "in"
beware that "out" inverts client, server, to make more sense
for hosts that aren't reachable from outside (i.e. NAT is
applied to the client, not to the server, whereas in "in" and
"both", it is always applied to the server)
"""
loginfo = Util.get_callee(3)
if not server or not ip:
raise PyromanException("Server not specified for NAT (server: %s, ip: %s) at %s" % (server, ip, loginfo))
# special case: "out" NAT type
if dir=="out":
(client, server) = (server, client)
Firewall.nats.append(nat.Nat(client, server, ip, port, dport, dir, loginfo))
def add_rule(target, server="", client="", service=""):
"""
Add an arbitrary rule to the list of rules.
Allow, reject, drop are special cases of this.
target -- target for the rule
server -- server host nickname
client -- client host nickname
service -- service this rule applies to
"""
loginfo = Util.get_callee(4)
if server == "" and client == "" and service == "":
raise PyromanException("allow() called without parameters at %s" % loginfo)
for srv in Util.splitter.split(server):
for cli in Util.splitter.split(client):
for svc in Util.splitter.split(service):
Firewall.rules.append(rule.Rule(target,srv,cli,svc,loginfo))
def add_chain(name, default="-", table="filter", id=None):
"""
Create a new firewall chain.
name -- name of the chain in iptables
id -- internal ID for the chain, defaults to name
default -- default target, use for built-in chains
table -- table this chain resides in, defaults to "filter"
"""
if not id:
id = name
if Firewall.chains.has_key(id):
raise PyromanException("Firewall chain %s defined multiple times at %s" % (id, Util.get_callee(3)))
loginfo = "Chain %s created by %s" % (name, Util.get_callee(3))
Firewall.chains[id] = Chain(name, loginfo, default=default, table=table)
def allow(server="", client="", service=""):
"""
Add an 'allow' rule to the list of rules.
This calls add_rule(Firewall.accept, ...)
server -- server host nickname
client -- client host nickname
service -- service this rule applies to
"""
add_rule(Firewall.accept, server, client, service)
def reject(server="", client="", service=""):
"""
Add a 'reject' rule to the list of rules
This calls add_rule(Firewall.reject, ...)
server -- server host nickname
client -- client host nickname
service -- service this rule applies to
"""
add_rule(Firewall.reject, server, client, service)
def drop(server="", client="", service=""):
"""
Add a 'drop' rule to the list of rules
This calls add_rule(Firewall.drop, ...)
server -- server host nickname
client -- client host nickname
service -- service this rule applies to
"""
add_rule(Firewall.drop, server, client, service)
def iptables(chain, filter):
"""
Add an arbitrary iptables command.
chain -- chain to add the rules to
filter -- iptables parameters
"""
loginfo = Util.get_callee(3)
if not Firewall.chains.has_key(chain):
raise PyromanException("Firewall chain %s not known (use add_chain!) at %s" % (chain, loginfo))
Firewall.chains[chain].append4(filter, loginfo)
def iptables_end(chain, filter):
"""
Add an arbitrary iptables command after any statement added
by the "allow", "drop", "reject", "add_rule" or "iptables" commands.
chain -- chain to add the rules to
filter -- iptables parameters
"""
loginfo = Util.get_callee(3)
if not Firewall.chains.has_key(chain):
raise PyromanException("Firewall chain %s not known (use add_chain!) at %s" % (chain, loginfo))
Firewall.chains[chain].append4_end(filter, loginfo)
def ip6tables(chain, filter):
"""
Add an arbitrary ip6tables command.
chain -- chain to add the rules to
filter -- iptables parameters
"""
loginfo = Util.get_callee(3)
if not Firewall.chains.has_key(chain):
raise PyromanException("Firewall chain %s not known (use add_chain!) at %s" % (chain, loginfo))
Firewall.chains[chain].append6(filter, loginfo)
def ip6tables_end(chain, filter):
"""
Add an arbitrary ip6tables command after any statement added
by the "allow", "drop", "reject", "add_rule" or "iptables" commands.
chain -- chain to add the rules to
filter -- iptables parameters
"""
loginfo = Util.get_callee(3)
if not Firewall.chains.has_key(chain):
raise PyromanException("Firewall chain %s not known (use add_chain!) at %s" % (chain, loginfo))
def ipXtables(chain, filter):
"""
Add an arbitrary iptables + ip6tables command.
chain -- chain to add the rules to
filter -- iptables parameters
"""
iptables(chain, filter)
ip6tables(chain, filter)
def ipXtables_end(chain, filter):
"""
Add an arbitrary iptables + ip6tables command after any statement added
by the "allow", "drop", "reject", "add_rule" or "iptables" commands.
chain -- chain to add the rules to
filter -- iptables parameters
"""
iptables_end(chain, filter)
ip6tables_end(chain, filter)
| mit |
ahb0327/intellij-community | python/lib/Lib/distutils/command/build.py | 85 | 4623 | """distutils.command.build
Implements the Distutils 'build' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os
from distutils.core import Command
from distutils.util import get_platform
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build (Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('compiler=', 'c',
"specify the compiler type"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.debug = None
self.force = 0
self.executable = None
def finalize_options (self):
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
if self.executable is None:
self.executable = os.path.normpath(sys.executable)
# finalize_options ()
def run (self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules (self):
return self.distribution.has_pure_modules()
def has_c_libraries (self):
return self.distribution.has_c_libraries()
def has_ext_modules (self):
return self.distribution.has_ext_modules()
def has_scripts (self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
# class build
| apache-2.0 |
spencerlyon2/pygments | pygments/lexers/markup.py | 2 | 13479 | # -*- coding: utf-8 -*-
"""
pygments.lexers.markup
~~~~~~~~~~~~~~~~~~~~~~
Lexers for non-HTML markup languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this, \
do_insertions, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
from pygments.util import get_bool_opt, ClassNotFound
__all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer']
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
.. versionadded:: 0.6
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
.. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
.. versionadded:: 0.7
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: language``,
``.. code:: language`` and ``.. code-block:: language``
directives with a lexer for the given language (default:
``True``).
.. versionadded:: 0.8
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
# from docutils.parsers.rst.states
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
default('#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
.. versionadded:: 0.6
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
| bsd-2-clause |
RedHatQE/cfme_tests | cfme/tests/automate/test_automate_manual.py | 1 | 25970 | """Manual tests"""
import pytest
from cfme import test_requirements
pytestmark = [test_requirements.automate, pytest.mark.manual]
@pytest.mark.tier(1)
def test_customize_request_security_group():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/4h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.6
casecomponent: Automate
tags: automate
title: Test customize request security group
testSteps:
1. Copy the "customize request" method to a writable domain and modify the mapping
setting from mapping = 0 to mapping = 1.
2. Create a REST API call to provision an Amazon or OpenStack instance and pass the
"security_group" value with the name in "additional_values" that you want to apply.
3. Check the request that was created and verify that the security group was not applied
expectedResults:
1.
2.
3. Specified security group gets set.
Bugzilla:
1335989
"""
pass
@pytest.mark.tier(1)
def test_automate_generic_object_service_associations():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/10h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.7
casecomponent: Automate
tags: automate
title: Test automate generic object service associations
testSteps:
1. Use the attached domain to test this bug:
2. Import end enable the domain
3. Have at least one service created (Generic is enough)
4. Run rails console and create the object definition:
GenericObjectDefinition.create(:name => "LoadBalancer", :properties => {
:attributes => {:location => "string"}, :associations => {:vms => "Vm",
:services => "Service"},})
5. Run tail -fn0 log/automation.log | egrep "ERROR|XYZ"
expectedResults:
1.
2.
3.
4.
5.
6. Simulate Request/GOTest with method execution
In the tail"ed log:
There should be no ERROR lines related to the execution.
There should be these two lines:
<AEMethod gotest> XYZ go object: #<MiqAeServiceGenericObject....something...>
<AEMethod gotest> XYZ load balancer got service:
#<MiqAeServiceService:....something....>
If there is "XYZ load balancer got service: nil", then this bug was reproduced.
Bugzilla:
1410920
"""
pass
@pytest.mark.tier(2)
def test_automate_git_domain_displayed_in_dialog():
"""
Check that the domain imported from git is displayed and usable in the
pop-up tree in the dialog editor.
You can use eg. https://github.com/ramrexx/CloudForms_Essentials.git
for that
Polarion:
assignee: ghubale
initialEstimate: 1/15h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.7
casecomponent: Automate
tags: automate
title: Test automate git domain displayed in dialog
testSteps:
1. Import domain given in step 2
2. You can use eg. https://github.com/ramrexx/CloudForms_Essentials.git
expectedResults:
1.
2. Check that the domain imported from git is displayed and usable in the pop-up tree
in the dialog editor.
"""
pass
@pytest.mark.tier(1)
def test_automate_engine_database_connection():
"""
All steps in: https://bugzilla.redhat.com/show_bug.cgi?id=1334909
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.7
casecomponent: Automate
tags: automate
title: Test automate engine database connection
testSteps:
1. Create a 'visibility' tag category, containing a single tag
2. Run the attached script via the RESTful API to duplicate the tags in the category
3. Observe the error
expectedResults:
1.
2.
3. No error
Bugzilla:
1334909
"""
pass
@pytest.mark.tier(3)
def test_automate_check_quota_regression():
"""
Update from 5.8.2 to 5.8.3 has broken custom automate method. Error
is thrown for the check_quota instance method for an undefined method
provisioned_storage.
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/6h
tags: automate
testSteps:
1. You"ll need to create an invalid VM provisioning request to reproduce this issue.
2. The starting point is an appliance with a provider configured, that can successfully
provision a VM using lifecycle provisioning.
3. Add a second provider to use for VM lifecycle provisioning.
4. Add a 2nd zone called "test_zone". (Don"t add a second appliance for this zone)
5. Set the zone of the second provider to be "test_zone".
6. Provision a VM for the second provider, using VM lifecycle provisioning.
(The provisioning request should remain in pending/active status and should not get
processed because there is no appliance/workers for the "test_zone".)
7. Delete the template used in step
8. Through the UI when you navigate to virtual machines, templates is on the left nav
bar, select the template used in step 4 and select: "Remove from Inventory"
9.Provisioning a VM for the first provider, using VM lifecycle provisioning should
produce the reported error.
expectedResults:
1.
2.
3.
4.
5.
6.
7.
8.
9. No error
Bugzilla:
1554989
"""
pass
@pytest.mark.tier(3)
def test_automate_git_domain_import_with_no_connection():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/6h
tags: automate
startsin: 5.7
testSteps:
1. Import a Git Domain into Automate
2. Server the connection to the GIT Server from the appliance
(Disable VPN or some other trick)
3. List all the Automate Domains using Automate-> Explorer
expectedResults:
1.
2.
3. The domain should be displayed properly
Bugzilla:
1391208
"""
pass
@pytest.mark.tier(1)
def test_automate_retry_onexit_increases():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/8h
tags: automate
testSteps:
1. Import the attached file, it will create a domain called OnExitRetry
2. Enable the domain
3. Go to Automate / Simulation
4. Simulate Request with instance OnExitRetry, execute methods
5. Click submit, open the tree on right and expand ae_state_retries
expectedResults:
1.
2.
3.
4.
5. It should be 1 by now and subsequent clicks on Retry should raise the
number if it works properly.
Bugzilla:
1365442
"""
pass
@pytest.mark.tier(3)
def test_automate_simulation_result_has_hash_data():
"""
The UI should display the result objects if the Simulation Result has
hash data.
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/6h
tags: automate
testSteps:
1. Create a Instance under /System/Request called ListUser, update it so that it points
to a ListUser Method
2. Create ListUser Method under /System/Request, paste the Attached Method
3. Run Simulation
expectedResults:
1.
2.
3. The UI should display the result objects
Bugzilla:
1445089
"""
pass
@pytest.mark.tier(3)
def test_automate_git_import_without_master():
"""
Git repository doesn't have to have master branch
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/12h
tags: automate
testSteps:
1. Create git repository with different default branch than master.
2. Add some valid code, for example exported one.
3. Navigate to Automation -> Automate -> Import/Export
4. Enter credentials and hit the submit button.
expectedResults:
1.
2.
3.
4. Domain was imported from git
Bugzilla:
1508881
"""
pass
@pytest.mark.tier(1)
def test_state_machine_variable():
"""
Test whether storing the state machine variable works and the value is
available in another state.
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/4h
tags: automate
testSteps:
1. Test whether storing the state machine variable works and the value is available in
another state.
"""
pass
@pytest.mark.tier(2)
def test_automate_method_copy():
"""
Should copy selected automate method/Instance without going into edit mode.
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/8h
tags: automate
startsin: 5.9
upstream: yes
testSteps:
1. Add new domain (In enabled/unlock mode)
2. Add namespace in that domain
3. Add class in that namespace
4. Unlock ManageIQ domain now
5. Select Instance/Method from any class in ManageIQ
6. From configuration toolbar, select "Copy this method/Instance"
expectedResults:
1.
2.
3.
4. Able to copy method with "Copy This Method" toolbar.
Bugzilla:
1500956
"""
pass
@pytest.mark.tier(3)
def test_automate_git_import_deleted_tag():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/12h
tags: automate
startsin: 5.7
testSteps:
1. Create a github-hosted repository containing a correctly formatted automate domain.
This repository should contain two or more tagged commits.
2. Import the git-hosted domain into automate. Note that the tags are visible to select
from in the import dialog
3. Delete the most recent tagged commit and tag from the source github repository
4. In automate explorer, click on the domain and click Configuration -> Refresh with a
new branch or tag
5. Observe the list of available tags to import from
expectedResults:
1.
2.
3.
4.
5. The deleted tag should no longer be visible in the list of tags to refresh from
Bugzilla:
1394194
"""
pass
@pytest.mark.tier(1)
def test_automate_service_quota_runs_only_once():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/4h
tags: automate
testSteps:
1. Provision a service.
2. Check the automation.log to see both quota checks, one for
ServiceTemplateProvisionRequest_created,
and ServiceTemplateProvisionRequest_starting.
expectedResults:
1.
2. Quota executed once.
Bugzilla:
1317698
"""
pass
@pytest.mark.tier(1)
def test_automate_state_method():
"""
You can pass methods as states compared to the old method of passing
instances which had to be located in different classes. You use the
METHOD:: prefix
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/4h
tags: automate
startsin: 5.6
setup: A fresh appliance.
testSteps:
1. Create an automate class that has one state.
2. Create a method in the class, make the method output
something recognizable in the logs
3. Create an instance inside the class, and as a Value for the
state use: METHOD::method_name where method_name is the name
of the method you created
4. Run a simulation, use Request / Call_Instance to call your
state machine instance
expectedResults:
1. Class created
2. Method created
3. Instance created
4. The method got called, detectable by grepping logs
"""
pass
@pytest.mark.tier(2)
def test_button_can_trigger_events():
"""
In the button creation dialog there must be MiqEvent available for
System/Process entry.
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/60h
tags: automate
startsin: 5.6.1
testSteps:
1. Go to automate and copy the Class /ManageIQ/System/Process to your custom domain
2. Create an instance named
MiqEvent with a rel5 of : /System/Event/MiqEvent/Policy/${/#event_type}
3. On the custom button provide the following details.
* System/Process/ MiqEvent
* Message create
* Request vm_retire_warn
* Attribute
* event_type vm_retire_warn
expectedResults:
1.
2.
3. The MiqEntry is present and triggering an event should work
Bugzilla:
1348605
"""
pass
@pytest.mark.tier(3)
def test_automate_requests_tab_exposed():
"""
Need to expose Automate => Requests tab from the Web UI without
exposing any other Automate tabs (i.e. Explorer, Customization,
Import/Export, Logs). The only way to expose this in the Web UI, is to
enable Services => Requests, and at least one tab from the Automate
section (i.e. Explorer, Customization, etc).
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/12h
tags: automate
startsin: 5.10
testSteps:
1. Test this with the role EvmRole-support
2. By default this role does not have access to the Automation tab in the Web UI.
3. Copy this role to AA-EVMRole-support and add all of the Automate role features.
4. Did not allow user to see Requests under Automate.
5. Enabled all the Service => Request role features.
6. This allows user to see the Automate => Requests.
expectedResults:
1.
2.
3.
4.
5.
6. "Automate/Requests" tab can be exposed for a role without exposing "Service/Requests"
tab
Bugzilla:
1508490
"""
pass
@pytest.mark.tier(3)
def test_automate_git_credentials_changed():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/6h
tags: automate
testSteps:
1. Customer is using a private enterprise git repo.
2. The original username was changed and upon a refresh, the customer noticed
it did not update
3. There was no message letting the user know there was a validation error
expectedResults:
1.
2.
3. There were no FATAL messages in the log if the credentials were changed
Bugzilla:
1552274
"""
pass
@pytest.mark.tier(1)
def test_automate_git_import_case_insensitive():
"""
bin/rake evm:automate:import PREVIEW=false
GIT_URL=https://github.com/mkanoor/SimpleDomain REF=test2branch
This should not cause an error (the actual name of the branch is
Test2Branch).
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/8h
tags: automate
startsin: 5.7
"""
pass
@pytest.mark.tier(1)
def test_assert_failed_substitution():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/4h
tags: automate
Bigzilla:
1335669
"""
pass
@pytest.mark.tier(3)
def test_automate_import_namespace_attributes_updated():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: low
initialEstimate: 1/12h
tags: automate
testSteps:
1. Export an Automate model
2. Change the display name and description in the exported namespace yaml file
3. Run an import with the updated data
4. Check if the namespace attributes get updated.Display name and description attributes
should get updated
Bugzilla:
1440226
"""
pass
@pytest.mark.tier(3)
def test_automate_user_has_groups():
"""
https://bugzilla.redhat.com/show_bug.cgi?id=1411424
This method should work: groups = $evm.vmdb(:user).first.miq_groups
$evm.log(:info, "Displaying the user"s groups: #{groups.inspect}")
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/12h
tags: automate
startsin: 5.8
Bugzilla:
1411424
"""
pass
@pytest.mark.tier(3)
def test_automate_quota_units():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: low
initialEstimate: 1/4h
tags: automate
Bugzilla:
1334318
"""
pass
@pytest.mark.tier(3)
def test_automate_restrict_domain_crud():
"""
When you create a role that can only view automate domains, it can
view automate domains, it cannot manipulate the domains themselves,
but can CRUD on namespaces, classes, instances ....
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: medium
initialEstimate: 1/6h
tags: automate
Bugzilla:
1365493
"""
pass
@pytest.mark.tier(3)
def test_automate_embedded_method():
"""
For a "new" method when adding Embedded Methods the UI hangs in the
tree view when the method is selected
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: high
initialEstimate: 1/12h
tags: automate
Bugzilla:
1523379
"""
pass
@pytest.mark.tier(3)
def test_automate_git_verify_ssl():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: low
initialEstimate: 1/12h
tags: automate
startsin: 5.7
Bugzilla:
1470738
"""
pass
@pytest.mark.tier(1)
def test_automate_buttons_requests():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: low
initialEstimate: 1/18h
tags: automate
testSteps:
1. Navigate to Automate -> Requests
2. Check whether these buttons are displayed: Reload, Apply , Reset, Default
"""
pass
@pytest.mark.tier(1)
def test_check_system_request_calls_depr_configurationmanagement():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: low
caseposneg: positive
testtype: functional
startsin: 5.10
casecomponent: Automate
tags: automate
testSteps:
1. Copy /System/Request/ansible_tower_job instance to new domain
2. Run that instance using simulation
3. See automation log
Bugzilla:
1615444
"""
pass
@pytest.mark.tier(1)
def test_list_of_diff_vm_storages_via_rails():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: Automate
tags: automate
testSteps:
1. vmware = $evm.vmdb('ems').find_by_name('vmware 6.5 (nested)') ;
2. vm = vmware.vms.select { |v| v.name == 'ghubale-cfme510' }.first ;
3. vm.storage
4. vm.storages
expectedResults:
1.
2.
3. Returns only one storage
4. Returns available storages
Bugzilla:
1574444
"""
pass
@pytest.mark.tier(1)
def test_method_for_log_and_notify():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: Automate
tags: automate
testSteps:
1. Create an Automate domain or use an existing writeable Automate domain
2. Create a new Automate Method
3. In the Automate Method screen embed ManageIQ/System/CommonMethods/Utils/log_object
you can pick this
method from the UI tree picker
4. In your method add a line akin to
ManageIQ::Automate::System::CommonMethods::Utils::LogObject.log_and_notify
(:info, "Hello Testing Log & Notify", $evm.root['vm'], $evm)
5. Check the logs
6. In your UI session you should see a notification
PR:
https://github.com/ManageIQ/manageiq-content/pull/423
"""
pass
@pytest.mark.tier(1)
def test_miq_stop_abort_with_state_machines():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: Automate
tags: automate
Bugzilla:
1441353
"""
pass
@pytest.mark.tier(3)
def test_user_requester_for_lifecycle_provision():
"""
Polarion:
assignee: ghubale
casecomponent: Automate
caseimportance: high
initialEstimate: 1/6h
tags: automate
testSteps:
1. Tested a series of service provisions and default lifecycle provisions with different
users (user1 only did default lifecycle provision,
user2 only did generic service provision or provision different service catalog items
with different users)
2. Added logging to the validate_request method for both Service and Infrastructure
namespaces
3. Viewed the automation.log
expectedResults:
1.
2.
3. The same user is shown in the automation.log as in the request
Bugzilla:
1671563
"""
pass
@pytest.mark.tier(1)
@pytest.mark.ignore_stream("5.10")
def test_remove_openshift_deployment_in_automate():
"""This test case will test successful removal of OpenShift Deployment removed from Automate
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.11
casecomponent: Automate
tags: automate
Bugzilla:
1672937
"""
pass
@pytest.mark.tier(1)
def test_vm_naming_number_padding():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.10
casecomponent: Automate
tags: automate
setup:
1. Add any provider
testSteps:
1. Provision more than 10 VMs
expectedResults:
1. VMs should be generated with respective numbering
Bugzilla:
1688672
"""
pass
@pytest.mark.tier(1)
@pytest.mark.ignore_stream("5.10")
def test_vm_name_automate_method():
"""This test case will check redesign of vm_name automated method
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.11
casecomponent: Automate
tags: automate
Bugzilla:
1677573
"""
pass
@pytest.mark.tier(1)
def test_null_coalescing_fields():
"""
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.10
casecomponent: Automate
tags: automate
testSteps:
1. Create a Ruby method or Ansible playbook method with Input Parameters.
2. Use Data Type null coalescing
3. Make the default value something like this : ${#var3} || ${#var2} || ${#var1}
expectedResults:
1.
2.
3. Normal null coalescing behavior
Bugzilla:
1698184
"""
| gpl-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.4/django/utils/simplejson/__init__.py | 80 | 14504 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
# Django modification: try to use the system version first, providing it's
# either of a later version of has the C speedups in place. Otherwise, fall
# back to our local copy.
__version__ = '2.0.7'
use_system_version = False
try:
# The system-installed version has priority providing it is either not an
# earlier version or it contains the C speedups.
import simplejson
if (simplejson.__version__.split('.') >= __version__.split('.') or
hasattr(simplejson, '_speedups')):
from simplejson import *
use_system_version = True
# Make sure we copy over the version. See #17071
__version__ = simplejson.__version__
except ImportError:
pass
if not use_system_version:
try:
from json import * # Python 2.6 preferred over local copy.
# There is a "json" package around that is not Python's "json", so we
# check for something that is only in the namespace of the version we
# want.
JSONDecoder
use_system_version = True
# Make sure we copy over the version. See #17071
__version__ = json.__version__
except (ImportError, NameError):
pass
# If all else fails, we have a bundled version that can be used.
if not use_system_version:
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from django.utils.simplejson.decoder import JSONDecoder
from django.utils.simplejson.encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| mit |
xsixing/blaze | blaze/tests/test_array_creation.py | 1 | 6519 | from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
import datashape
import blaze
from blaze.datadescriptor import dd_as_py
from blaze.tests.common import MayBeUriTest
from blaze import append
from blaze.py2help import skip
class TestEphemeral(unittest.TestCase):
def test_create_scalar(self):
a = blaze.array(True)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('bool'))
self.assertEqual(bool(a), True)
a = blaze.array(-123456)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('int32'))
self.assertEqual(int(a), -123456)
a = blaze.array(-1.25e-10)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('float64'))
self.assertEqual(float(a), -1.25e-10)
a = blaze.array(-1.25e-10+2.5j)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('complex[float64]'))
self.assertEqual(complex(a), -1.25e-10+2.5j)
def test_create_from_numpy(self):
a = blaze.array(np.arange(3))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [0, 1, 2])
def test_create(self):
# A default array (backed by NumPy)
a = blaze.array([1,2,3])
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3])
def test_create_append(self):
# A default array (backed by NumPy, append not supported yet)
a = blaze.array([])
self.assertTrue(isinstance(a, blaze.Array))
self.assertRaises(ValueError, append, a, [1,2,3])
# XXX The tests below still do not work
# self.assertEqual(a[0], 1)
# self.assertEqual(a[1], 2)
# self.assertEqual(a[2], 3)
def test_create_compress(self):
# A compressed array (backed by BLZ)
a = blaze.array(np.arange(1,4), caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3])
# XXX The tests below still do not work
# self.assertEqual(a[0], 1)
# self.assertEqual(a[1], 2)
# self.assertEqual(a[2], 3)
def test_create_iter(self):
# A simple 1D array
a = blaze.array(i for i in range(10))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('10 * int32'))
self.assertEqual(dd_as_py(a._data), list(range(10)))
# A nested iter
a = blaze.array((i for i in range(x)) for x in range(5))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('5 * var * int32'))
self.assertEqual(dd_as_py(a._data),
[[i for i in range(x)] for x in range(5)])
# A list of iter
a = blaze.array([range(3), (1.5*x for x in range(4)), iter([-1, 1])])
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('3 * var * float64'))
self.assertEqual(dd_as_py(a._data),
[list(range(3)),
[1.5*x for x in range(4)],
[-1, 1]])
def test_create_compress_iter(self):
# A compressed array (backed by BLZ)
a = blaze.array((i for i in range(10)), caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), list(range(10)))
def test_create_zeros(self):
# A default array
a = blaze.zeros('10 * int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [0]*10)
def test_create_compress_zeros(self):
# A compressed array (backed by BLZ)
a = blaze.zeros('10 * int64', caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [0]*10)
def test_create_ones(self):
# A default array
a = blaze.ones('10 * int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1]*10)
def test_create_compress_ones(self):
# A compressed array (backed by BLZ)
a = blaze.ones('10 * int64', caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1]*10)
def test_create_record(self):
# A simple record array
a = blaze.array([(10, 3.5), (15, 2.25)],
dshape="var * {val: int32, flt: float32}")
self.assertEqual(dd_as_py(a._data), [{'val': 10, 'flt': 3.5},
{'val': 15, 'flt': 2.25}])
# Test field access via attributes
aval = a.val
self.assertEqual(dd_as_py(aval._data), [10, 15])
aflt = a.flt
self.assertEqual(dd_as_py(aflt._data), [3.5, 2.25])
class TestPersistent(MayBeUriTest, unittest.TestCase):
uri = True
def test_create(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.array([], 'float64', storage=persist)
self.assertTrue(isinstance(a, blaze.Array))
print("->", a.dshape.shape)
self.assertTrue(a.dshape.shape == (0,))
self.assertEqual(dd_as_py(a._data), [])
def test_append(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.zeros('0 * float64', storage=persist)
self.assertTrue(isinstance(a, blaze.Array))
append(a,list(range(10)))
self.assertEqual(dd_as_py(a._data), list(range(10)))
# Using a 1-dim as the internal dimension
def test_append2(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.empty('0 * 2 * float64', storage=persist)
self.assertTrue(isinstance(a, blaze.Array))
lvals = [[i,i*2] for i in range(10)]
append(a,lvals)
self.assertEqual(dd_as_py(a._data), lvals)
def test_open(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.ones('0 * float64', storage=persist)
append(a,range(10))
# Re-open the dataset in URI
a2 = blaze.open(persist)
self.assertTrue(isinstance(a2, blaze.Array))
self.assertEqual(dd_as_py(a2._data), list(range(10)))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
PX4/Firmware | Tools/px4airframes/xmlout.py | 8 | 2845 | import xml.etree.ElementTree as ET
import codecs
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class XMLOutput():
def __init__(self, groups, board):
xml_parameters = ET.Element("airframes")
xml_version = ET.SubElement(xml_parameters, "version")
xml_version.text = "1"
xml_version = ET.SubElement(xml_parameters, "airframe_version_major")
xml_version.text = "1"
xml_version = ET.SubElement(xml_parameters, "airframe_version_minor")
xml_version.text = "1"
for group in groups:
xml_group = ET.SubElement(xml_parameters, "airframe_group")
xml_group.attrib["name"] = group.GetName()
xml_group.attrib["image"] = group.GetImageName()
for param in group.GetParams():
# check if there is an exclude tag for this airframe
excluded = False
for code in param.GetArchCodes():
if "CONFIG_ARCH_BOARD_{0}".format(code) == board and param.GetArchValue(code) == "exclude":
excluded = True
if not excluded:
#print("generating: {0} {1}".format(param.GetName(), excluded))
xml_param = ET.SubElement(xml_group, "airframe")
xml_param.attrib["name"] = param.GetName()
xml_param.attrib["id"] = param.GetId()
xml_param.attrib["maintainer"] = param.GetMaintainer()
for code in param.GetFieldCodes():
value = param.GetFieldValue(code)
xml_field = ET.SubElement(xml_param, code)
xml_field.text = value
for code in param.GetOutputCodes():
value = param.GetOutputValue(code)
valstrs = value.split(";")
xml_field = ET.SubElement(xml_param, "output")
xml_field.attrib["name"] = code
for attrib in valstrs[1:]:
attribstrs = attrib.split(":")
xml_field.attrib[attribstrs[0].strip()] = attribstrs[1].strip()
xml_field.text = valstrs[0]
indent(xml_parameters)
self.xml_document = ET.ElementTree(xml_parameters)
def Save(self, filename):
self.xml_document.write(filename, encoding="UTF-8")
| bsd-3-clause |
ncole458/generator-phonegapsass | node_modules/phonegap/node_modules/cordova/node_modules/plugman/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 115 | 13925 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
if (os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# Use the 64-on-64 compiler if we can.
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005', '9.0': '2008', '10.0': '2010', '11.0': '2012'}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, 'vcexpress.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif os.path.exists(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version or 'e' not in msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to an "e" version (e.g. 2010e)')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
blankclemens/tools-iuc | data_managers/data_manager_bowtie_index_builder/data_manager/bowtie_index_builder.py | 10 | 4142 | #!/usr/bin/env python
from __future__ import print_function
import json
import optparse
import os
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "bowtie_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_bowtie_index( data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME, color_space=False ):
# TODO: allow multiple FASTA input files
fasta_base_name = os.path.split( fasta_filename )[-1]
sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
os.symlink( fasta_filename, sym_linked_fasta_filename )
args = [ 'bowtie-build' ]
if color_space:
args.append( '-C' )
args.append( sym_linked_fasta_filename)
args.append( fasta_base_name )
args.append( sym_linked_fasta_filename )
tmp_stderr = tempfile.NamedTemporaryFile( prefix="tmp-data-manager-bowtie-index-builder-stderr" )
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno() )
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print("Error building index:", file=sys.stderr)
while True:
chunk = tmp_stderr.read( CHUNK_SIZE )
if not chunk:
break
sys.stderr.write( chunk )
sys.exit( return_code )
tmp_stderr.close()
data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
parser = optparse.OptionParser()
parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
parser.add_option( '-c', '--color_space', dest='color_space', action='store_true', default=False, help='color_space' )
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads( open( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
os.mkdir( target_directory )
data_manager_dict = {}
dbkey = options.fasta_dbkey
if dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description )
# build the index
build_bowtie_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME, color_space=options.color_space )
# save info to json file
open( filename, 'wb' ).write( json.dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
| mit |
webhost/namebench | nb_third_party/dns/set.py | 248 | 7843 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A simple Set class."""
class Set(object):
"""A simple set class.
Sets are not in Python until 2.3, and rdata are not immutable so
we cannot use sets.Set anyway. This class implements subset of
the 2.3 Set interface using a list as the container.
@ivar items: A list of the items which are in the set
@type items: list"""
__slots__ = ['items']
def __init__(self, items=None):
"""Initialize the set.
@param items: the initial set of items
@type items: any iterable or None
"""
self.items = []
if not items is None:
for item in items:
self.add(item)
def __repr__(self):
return "dns.simpleset.Set(%s)" % repr(self.items)
def add(self, item):
"""Add an item to the set."""
if not item in self.items:
self.items.append(item)
def remove(self, item):
"""Remove an item from the set."""
self.items.remove(item)
def discard(self, item):
"""Remove an item from the set if present."""
try:
self.items.remove(item)
except ValueError:
pass
def _clone(self):
"""Make a (shallow) copy of the set.
There is a 'clone protocol' that subclasses of this class
should use. To make a copy, first call your super's _clone()
method, and use the object returned as the new instance. Then
make shallow copies of the attributes defined in the subclass.
This protocol allows us to write the set algorithms that
return new instances (e.g. union) once, and keep using them in
subclasses.
"""
cls = self.__class__
obj = cls.__new__(cls)
obj.items = list(self.items)
return obj
def __copy__(self):
"""Make a (shallow) copy of the set."""
return self._clone()
def copy(self):
"""Make a (shallow) copy of the set."""
return self._clone()
def union_update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
for item in other.items:
self.add(item)
def intersection_update(self, other):
"""Update the set, removing any elements from other which are not
in both sets.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
# we make a copy of the list so that we can remove items from
# the list without breaking the iterator.
for item in list(self.items):
if item not in other.items:
self.items.remove(item)
def difference_update(self, other):
"""Update the set, removing any elements from other which are in
the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
self.items = []
else:
for item in other.items:
self.discard(item)
def union(self, other):
"""Return a new set which is the union of I{self} and I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.union_update(other)
return obj
def intersection(self, other):
"""Return a new set which is the intersection of I{self} and I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.intersection_update(other)
return obj
def difference(self, other):
"""Return a new set which I{self} - I{other}, i.e. the items
in I{self} which are not also in I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.difference_update(other)
return obj
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __add__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __ior__(self, other):
self.union_update(other)
return self
def __iand__(self, other):
self.intersection_update(other)
return self
def __iadd__(self, other):
self.union_update(other)
return self
def __isub__(self, other):
self.difference_update(other)
return self
def update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: any iterable type"""
for item in other:
self.add(item)
def clear(self):
"""Make the set empty."""
self.items = []
def __eq__(self, other):
# Yes, this is inefficient but the sets we're dealing with are
# usually quite small, so it shouldn't hurt too much.
for item in self.items:
if not item in other.items:
return False
for item in other.items:
if not item in self.items:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def __getitem__(self, i):
return self.items[i]
def __delitem__(self, i):
del self.items[i]
def __getslice__(self, i, j):
return self.items[i:j]
def __delslice__(self, i, j):
del self.items[i:j]
def issubset(self, other):
"""Is I{self} a subset of I{other}?
@rtype: bool
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
for item in self.items:
if not item in other.items:
return False
return True
def issuperset(self, other):
"""Is I{self} a superset of I{other}?
@rtype: bool
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
for item in other.items:
if not item in self.items:
return False
return True
| apache-2.0 |
Ayub-Khan/edx-platform | lms/envs/content.py | 168 | 1088 | """
These are debug machines used for content creators, so they're kind of a cross
between dev machines and AWS machines.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .aws import *
DEBUG = True
TEMPLATE_DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
| agpl-3.0 |
ucrawler/cp-uc | couchpotato/core/settings.py | 42 | 8457 | from __future__ import with_statement
import ConfigParser
from hashlib import md5
from CodernityDB.hash_index import HashIndex
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import mergeDicts, tryInt, tryFloat
class Settings(object):
options = {}
types = {}
def __init__(self):
addApiView('settings', self.view, docs = {
'desc': 'Return the options and its values of settings.conf. Including the default values and group ordering used on the settings page.',
'return': {'type': 'object', 'example': """{
// objects like in __init__.py of plugin
"options": {
"moovee" : {
"groups" : [{
"description" : "SD movies only",
"name" : "#alt.binaries.moovee",
"options" : [{
"default" : false,
"name" : "enabled",
"type" : "enabler"
}],
"tab" : "providers"
}],
"name" : "moovee"
}
},
// object structured like settings.conf
"values": {
"moovee": {
"enabled": false
}
}
}"""}
})
addApiView('settings.save', self.saveView, docs = {
'desc': 'Save setting to config file (settings.conf)',
'params': {
'section': {'desc': 'The section name in settings.conf'},
'name': {'desc': 'The option name'},
'value': {'desc': 'The value you want to save'},
}
})
addEvent('database.setup', self.databaseSetup)
self.file = None
self.p = None
self.log = None
def setFile(self, config_file):
self.file = config_file
self.p = ConfigParser.RawConfigParser()
self.p.read(config_file)
from couchpotato.core.logger import CPLog
self.log = CPLog(__name__)
self.connectEvents()
def databaseSetup(self):
fireEvent('database.setup_index', 'property', PropertyIndex)
def parser(self):
return self.p
def sections(self):
return self.p.sections()
def connectEvents(self):
addEvent('settings.options', self.addOptions)
addEvent('settings.register', self.registerDefaults)
addEvent('settings.save', self.save)
def registerDefaults(self, section_name, options = None, save = True):
if not options: options = {}
self.addSection(section_name)
for option_name, option in options.items():
self.setDefault(section_name, option_name, option.get('default', ''))
# Migrate old settings from old location to the new location
if option.get('migrate_from'):
if self.p.has_option(option.get('migrate_from'), option_name):
previous_value = self.p.get(option.get('migrate_from'), option_name)
self.p.set(section_name, option_name, previous_value)
self.p.remove_option(option.get('migrate_from'), option_name)
if option.get('type'):
self.setType(section_name, option_name, option.get('type'))
if save:
self.save()
def set(self, section, option, value):
return self.p.set(section, option, value)
def get(self, option = '', section = 'core', default = None, type = None):
try:
try: type = self.types[section][option]
except: type = 'unicode' if not type else type
if hasattr(self, 'get%s' % type.capitalize()):
return getattr(self, 'get%s' % type.capitalize())(section, option)
else:
return self.getUnicode(section, option)
except:
return default
def delete(self, option = '', section = 'core'):
self.p.remove_option(section, option)
self.save()
def getEnabler(self, section, option):
return self.getBool(section, option)
def getBool(self, section, option):
try:
return self.p.getboolean(section, option)
except:
return self.p.get(section, option) == 1
def getInt(self, section, option):
try:
return self.p.getint(section, option)
except:
return tryInt(self.p.get(section, option))
def getFloat(self, section, option):
try:
return self.p.getfloat(section, option)
except:
return tryFloat(self.p.get(section, option))
def getUnicode(self, section, option):
value = self.p.get(section, option).decode('unicode_escape')
return toUnicode(value).strip()
def getValues(self):
values = {}
for section in self.sections():
values[section] = {}
for option in self.p.items(section):
(option_name, option_value) = option
is_password = False
try: is_password = self.types[section][option_name] == 'password'
except: pass
values[section][option_name] = self.get(option_name, section)
if is_password and values[section][option_name]:
values[section][option_name] = len(values[section][option_name]) * '*'
return values
def save(self):
with open(self.file, 'wb') as configfile:
self.p.write(configfile)
self.log.debug('Saved settings')
def addSection(self, section):
if not self.p.has_section(section):
self.p.add_section(section)
def setDefault(self, section, option, value):
if not self.p.has_option(section, option):
self.p.set(section, option, value)
def setType(self, section, option, type):
if not self.types.get(section):
self.types[section] = {}
self.types[section][option] = type
def addOptions(self, section_name, options):
if not self.options.get(section_name):
self.options[section_name] = options
else:
self.options[section_name] = mergeDicts(self.options[section_name], options)
def getOptions(self):
return self.options
def view(self, **kwargs):
return {
'options': self.getOptions(),
'values': self.getValues()
}
def saveView(self, **kwargs):
section = kwargs.get('section')
option = kwargs.get('name')
value = kwargs.get('value')
# See if a value handler is attached, use that as value
new_value = fireEvent('setting.save.%s.%s' % (section, option), value, single = True)
self.set(section, option, (new_value if new_value else value).encode('unicode_escape'))
self.save()
# After save (for re-interval etc)
fireEvent('setting.save.%s.%s.after' % (section, option), single = True)
fireEvent('setting.save.%s.*.after' % section, single = True)
return {
'success': True,
}
def getProperty(self, identifier):
from couchpotato import get_db
db = get_db()
prop = None
try:
propert = db.get('property', identifier, with_doc = True)
prop = propert['doc']['value']
except:
pass # self.log.debug('Property "%s" doesn\'t exist: %s', (identifier, traceback.format_exc(0)))
return prop
def setProperty(self, identifier, value = ''):
from couchpotato import get_db
db = get_db()
try:
p = db.get('property', identifier, with_doc = True)
p['doc'].update({
'identifier': identifier,
'value': toUnicode(value),
})
db.update(p['doc'])
except:
db.insert({
'_t': 'property',
'identifier': identifier,
'value': toUnicode(value),
})
class PropertyIndex(HashIndex):
_version = 1
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(PropertyIndex, self).__init__(*args, **kwargs)
def make_key(self, key):
return md5(key).hexdigest()
def make_key_value(self, data):
if data.get('_t') == 'property':
return md5(data['identifier']).hexdigest(), None
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.