repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
ajoubert-mitre/geoq
geoq/maps/migrations/0014_add_map_layer_user_remembered_params.py
5
20522
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MapLayerUserRememberedParams' db.create_table(u'maps_maplayeruserrememberedparams', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('maplayer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_saved_params_set', to=orm['maps.MapLayer'])), ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='map_layer_saved_params_set', to=orm['auth.User'])), ('values', self.gf('jsonfield.fields.JSONField')(null=True, blank=True)), ('map', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['maps.Map'], null=True, blank=True)), )) db.send_create_signal(u'maps', ['MapLayerUserRememberedParams']) # Changing field 'Layer.dynamic_params' db.alter_column(u'maps_layer', 'dynamic_params', self.gf('jsonfield.fields.JSONField')(null=True)) def backwards(self, orm): # Deleting model 'MapLayerUserRememberedParams' db.delete_table(u'maps_maplayeruserrememberedparams') # Changing field 'Layer.dynamic_params' db.alter_column(u'maps_layer', 'dynamic_params', self.gf('jsonfield.JSONField')(null=True)) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'core.aoi': { 'Meta': {'object_name': 'AOI'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'analyst': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'assignee_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'assignee_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aois'", 'to': u"orm['core.Job']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'polygon': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}), 'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '5', 'max_length': '1'}), 'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aoi_reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Unassigned'", 'max_length': '15'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'core.job': { 'Meta': {'ordering': "('-created_at',)", 'object_name': 'Job'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'analysts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'analysts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'assignee_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'assignee_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'feature_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['maps.FeatureType']", 'null': 'True', 'blank': 'True'}), 'grid': ('django.db.models.fields.CharField', [], {'default': "'usng'", 'max_length': '5'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.Map']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'progress': ('django.db.models.fields.SmallIntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project'", 'to': u"orm['core.Project']"}), 'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'required_courses': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['training.Training']", 'null': 'True', 'blank': 'True'}), 'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'tags': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.Group']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'core.project': { 'Meta': {'ordering': "('-created_at',)", 'object_name': 'Project'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'contributors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'project_admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'project_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'project_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maps.feature': { 'Meta': {'ordering': "('-updated_at', 'aoi')", 'object_name': 'Feature'}, 'analyst': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'aoi': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'features'", 'to': u"orm['core.AOI']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Job']"}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Project']"}), 'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'In work'", 'max_length': '15'}), 'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.FeatureType']", 'on_delete': 'models.PROTECT'}), 'the_geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maps.featuretype': { 'Meta': {'ordering': "['-category', 'order', 'name', 'id']", 'object_name': 'FeatureType'}, 'category': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25', 'null': 'True', 'blank': 'True'}), 'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}), 'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'style': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '25'}) }, u'maps.geoeventssource': { 'Meta': {'object_name': 'GeoeventsSource'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '500'}) }, u'maps.layer': { 'Meta': {'ordering': "['name']", 'object_name': 'Layer'}, 'additional_domains': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'attribution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'constraints': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'downloadableLink': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}), 'dynamic_params': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'enable_identify': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'extent': ('django.contrib.gis.db.models.fields.PolygonField', [], {'null': 'True', 'blank': 'True'}), 'fields_to_show': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_format': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'info_format': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'layer': ('django.db.models.fields.CharField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}), 'layer_info_link': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'layer_params': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'layer_parsing_function': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'refreshrate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'root_field': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'spatial_reference': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'styles': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}), 'transparent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '75'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, u'maps.map': { 'Meta': {'object_name': 'Map'}, 'center_x': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}), 'center_y': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'projection': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'zoom': ('django.db.models.fields.IntegerField', [], {'default': '5', 'null': 'True', 'blank': 'True'}) }, u'maps.maplayer': { 'Meta': {'ordering': "['stack_order']", 'object_name': 'MapLayer'}, 'display_in_layer_switcher': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_base_layer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'map_layer_set'", 'to': u"orm['maps.Layer']"}), 'map': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'map_set'", 'to': u"orm['maps.Map']"}), 'opacity': ('django.db.models.fields.FloatField', [], {'default': '0.80000000000000004'}), 'shown': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'stack_order': ('django.db.models.fields.IntegerField', [], {}) }, u'maps.maplayeruserrememberedparams': { 'Meta': {'object_name': 'MapLayerUserRememberedParams'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.Map']", 'null': 'True', 'blank': 'True'}), 'maplayer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_saved_params_set'", 'to': u"orm['maps.MapLayer']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'map_layer_saved_params_set'", 'to': u"orm['auth.User']"}), 'values': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}) }, u'training.training': { 'Meta': {'object_name': 'Training'}, 'category': ('django.db.models.fields.CharField', [], {'default': "'Uncategorized'", 'max_length': '120', 'null': 'True', 'blank': 'True'}), 'content_link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'gamification_signals': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'primary_contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'quiz_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'users_completed': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'users_completed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}) } } complete_apps = ['maps']
mit
DONIKAN/django
django/core/serializers/xml_serializer.py
124
15694
""" XML serializer. """ from __future__ import unicode_literals from collections import OrderedDict from xml.dom import pulldom from xml.sax import handler from xml.sax.expatreader import ExpatParser as _ExpatParser from django.apps import apps from django.conf import settings from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils.encoding import smart_text from django.utils.xmlutils import ( SimplerXMLGenerator, UnserializableContentError, ) class Serializer(base.Serializer): """ Serializes a QuerySet to XML. """ def indent(self, level): if self.options.get('indent') is not None: self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level) def start_serialization(self): """ Start serialization -- open the XML document and the root element. """ self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET)) self.xml.startDocument() self.xml.startElement("django-objects", {"version": "1.0"}) def end_serialization(self): """ End serialization -- end the document. """ self.indent(0) self.xml.endElement("django-objects") self.xml.endDocument() def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) model = obj._meta.proxy_for_model if obj._deferred else obj.__class__ attrs = OrderedDict([("model", smart_text(model._meta))]) if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): obj_pk = obj._get_pk_val() if obj_pk is not None: attrs['pk'] = smart_text(obj_pk) self.xml.startElement("object", attrs) def end_object(self, obj): """ Called after handling all fields for an object. """ self.indent(1) self.xml.endElement("object") def handle_field(self, obj, field): """ Called to handle each field on an object (except for ForeignKeys and ManyToManyFields) """ self.indent(2) self.xml.startElement("field", OrderedDict([ ("name", field.name), ("type", field.get_internal_type()), ])) # Get a "string version" of the object's data. if getattr(obj, field.name) is not None: try: self.xml.characters(field.value_to_string(obj)) except UnserializableContentError: raise ValueError("%s.%s (pk:%s) contains unserializable characters" % ( obj.__class__.__name__, field.name, obj._get_pk_val())) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_fk_field(self, obj, field): """ Called to handle a ForeignKey (we need to treat them slightly differently from regular fields). """ self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement("natural", {}) self.xml.characters(smart_text(key_value)) self.xml.endElement("natural") else: self.xml.characters(smart_text(related_att)) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_m2m_field(self, obj, field): """ Called to handle a ManyToManyField. Related objects are only serialized as references to the object's PK (i.e. the related *data* is not dumped, just the relation). """ if field.remote_field.through._meta.auto_created: self._start_relational_field(field) if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): # If the objects in the m2m have a natural key, use it def handle_m2m(value): natural = value.natural_key() # Iterable natural keys are rolled out as subelements self.xml.startElement("object", {}) for key_value in natural: self.xml.startElement("natural", {}) self.xml.characters(smart_text(key_value)) self.xml.endElement("natural") self.xml.endElement("object") else: def handle_m2m(value): self.xml.addQuickElement("object", attrs={ 'pk': smart_text(value._get_pk_val()) }) for relobj in getattr(obj, field.name).iterator(): handle_m2m(relobj) self.xml.endElement("field") def _start_relational_field(self, field): """ Helper to output the <field> element for relational fields """ self.indent(2) self.xml.startElement("field", OrderedDict([ ("name", field.name), ("rel", field.remote_field.__class__.__name__), ("to", smart_text(field.remote_field.model._meta)), ])) class Deserializer(base.Deserializer): """ Deserialize XML. """ def __init__(self, stream_or_string, **options): super(Deserializer, self).__init__(stream_or_string, **options) self.event_stream = pulldom.parse(self.stream, self._make_parser()) self.db = options.pop('using', DEFAULT_DB_ALIAS) self.ignore = options.pop('ignorenonexistent', False) def _make_parser(self): """Create a hardened XML parser (no custom/external entities).""" return DefusedExpatParser() def __next__(self): for event, node in self.event_stream: if event == "START_ELEMENT" and node.nodeName == "object": self.event_stream.expandNode(node) return self._handle_object(node) raise StopIteration def _handle_object(self, node): """ Convert an <object> node to a DeserializedObject. """ # Look up the model using the model loading mechanism. If this fails, # bail. Model = self._get_model_from_node(node, "model") # Start building a data dictionary from the object. data = {} if node.hasAttribute('pk'): data[Model._meta.pk.attname] = Model._meta.pk.to_python( node.getAttribute('pk')) # Also start building a dict of m2m data (this is saved as # {m2m_accessor_attribute : [list_of_related_objects]}) m2m_data = {} field_names = {f.name for f in Model._meta.get_fields()} # Deserialize each field. for field_node in node.getElementsByTagName("field"): # If the field is missing the name attribute, bail (are you # sensing a pattern here?) field_name = field_node.getAttribute("name") if not field_name: raise base.DeserializationError("<field> node is missing the 'name' attribute") # Get the field from the Model. This will raise a # FieldDoesNotExist if, well, the field doesn't exist, which will # be propagated correctly unless ignorenonexistent=True is used. if self.ignore and field_name not in field_names: continue field = Model._meta.get_field(field_name) # As is usually the case, relation fields get the special treatment. if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel): m2m_data[field.name] = self._handle_m2m_field_node(field_node, field) elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel): data[field.attname] = self._handle_fk_field_node(field_node, field) else: if field_node.getElementsByTagName('None'): value = None else: value = field.to_python(getInnerText(field_node).strip()) data[field.name] = value obj = base.build_instance(Model, data, self.db) # Return a DeserializedObject so that the m2m data has a place to live. return base.DeserializedObject(obj, m2m_data) def _handle_fk_field_node(self, node, field): """ Handle a <field> node for a ForeignKey """ # Check if there is a child node named 'None', returning None if so. if node.getElementsByTagName('None'): return None else: if hasattr(field.remote_field.model._default_manager, 'get_by_natural_key'): keys = node.getElementsByTagName('natural') if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] obj = field.remote_field.model._default_manager.db_manager(self.db).get_by_natural_key(*field_value) obj_pk = getattr(obj, field.remote_field.field_name) # If this is a natural foreign key to an object that # has a FK/O2O as the foreign key, use the FK value if field.remote_field.model._meta.pk.remote_field: obj_pk = obj_pk.pk else: # Otherwise, treat like a normal PK field_value = getInnerText(node).strip() obj_pk = field.remote_field.model._meta.get_field(field.remote_field.field_name).to_python(field_value) return obj_pk else: field_value = getInnerText(node).strip() return field.remote_field.model._meta.get_field(field.remote_field.field_name).to_python(field_value) def _handle_m2m_field_node(self, node, field): """ Handle a <field> node for a ManyToManyField. """ if hasattr(field.remote_field.model._default_manager, 'get_by_natural_key'): def m2m_convert(n): keys = n.getElementsByTagName('natural') if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] obj_pk = field.remote_field.model._default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk else: # Otherwise, treat like a normal PK value. obj_pk = field.remote_field.model._meta.pk.to_python(n.getAttribute('pk')) return obj_pk else: m2m_convert = lambda n: field.remote_field.model._meta.pk.to_python(n.getAttribute('pk')) return [m2m_convert(c) for c in node.getElementsByTagName("object")] def _get_model_from_node(self, node, attr): """ Helper to look up a model from a <object model=...> or a <field rel=... to=...> node. """ model_identifier = node.getAttribute(attr) if not model_identifier: raise base.DeserializationError( "<%s> node is missing the required '%s' attribute" % (node.nodeName, attr)) try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError( "<%s> node has invalid model identifier: '%s'" % (node.nodeName, model_identifier)) def getInnerText(node): """ Get all the inner text of a DOM node (recursively). """ # inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html inner_text = [] for child in node.childNodes: if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE: inner_text.append(child.data) elif child.nodeType == child.ELEMENT_NODE: inner_text.extend(getInnerText(child)) else: pass return "".join(inner_text) # Below code based on Christian Heimes' defusedxml class DefusedExpatParser(_ExpatParser): """ An expat parser hardened against XML bomb attacks. Forbids DTDs, external entity references """ def __init__(self, *args, **kwargs): _ExpatParser.__init__(self, *args, **kwargs) self.setFeature(handler.feature_external_ges, False) self.setFeature(handler.feature_external_pes, False) def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise DTDForbidden(name, sysid, pubid) def entity_decl(self, name, is_parameter_entity, value, base, sysid, pubid, notation_name): raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) def external_entity_ref_handler(self, context, base, sysid, pubid): raise ExternalReferenceForbidden(context, base, sysid, pubid) def reset(self): _ExpatParser.reset(self) parser = self._parser parser.StartDoctypeDeclHandler = self.start_doctype_decl parser.EntityDeclHandler = self.entity_decl parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl parser.ExternalEntityRefHandler = self.external_entity_ref_handler class DefusedXmlException(ValueError): """Base exception.""" def __repr__(self): return str(self) class DTDForbidden(DefusedXmlException): """Document type definition is forbidden.""" def __init__(self, name, sysid, pubid): super(DTDForbidden, self).__init__() self.name = name self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class EntitiesForbidden(DefusedXmlException): """Entity definition is forbidden.""" def __init__(self, name, value, base, sysid, pubid, notation_name): super(EntitiesForbidden, self).__init__() self.name = name self.value = value self.base = base self.sysid = sysid self.pubid = pubid self.notation_name = notation_name def __str__(self): tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class ExternalReferenceForbidden(DefusedXmlException): """Resolving an external reference is forbidden.""" def __init__(self, context, base, sysid, pubid): super(ExternalReferenceForbidden, self).__init__() self.context = context self.base = base self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})" return tpl.format(self.sysid, self.pubid)
bsd-3-clause
gurneyalex/odoo
addons/payment_test/models/payment_acquirer.py
4
1751
# coding: utf-8 # Part of Odoo. See LICENSE file for full copyright and licensing details. from datetime import datetime from uuid import uuid4 from odoo import api, exceptions, fields, models, _ class PaymentAcquirerTest(models.Model): _inherit = 'payment.acquirer' provider = fields.Selection(selection_add=[('test', 'Test')]) @api.model def create(self, values): if values.get('provider') == 'test' and 'state' in values and values.get('state') not in ('test', 'disabled'): raise exceptions.UserError(_('This acquirer should not be used for other purposes than testing.')) return super(PaymentAcquirerTest, self).create(values) def write(self, values): if any(rec.provider == 'test' for rec in self) and 'state' in values and values.get('state') not in ('test', 'disabled'): raise exceptions.UserError(_('This acquirer should not be used for other purposes than testing.')) return super(PaymentAcquirerTest, self).write(values) @api.model def test_s2s_form_process(self, data): """ Return a minimal token to allow proceeding to transaction creation. """ payment_token = self.env['payment.token'].sudo().create({ 'acquirer_ref': uuid4(), 'acquirer_id': int(data['acquirer_id']), 'partner_id': int(data['partner_id']), 'name': 'XXXXXXXXXXXX%s - %s' % (data['cc_number'][-4:], data['cc_holder_name']) }) return payment_token class PaymentTransactionTest(models.Model): _inherit = 'payment.transaction' def test_create(self, values): """Automatically set the transaction as successful upon creation. """ return {'date': datetime.now(), 'state': 'done'}
agpl-3.0
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py
1783
19590
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .constants import eStart, eError, eItsMe # BIG5 BIG5_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,1, # 78 - 7f 4,4,4,4,4,4,4,4, # 80 - 87 4,4,4,4,4,4,4,4, # 88 - 8f 4,4,4,4,4,4,4,4, # 90 - 97 4,4,4,4,4,4,4,4, # 98 - 9f 4,3,3,3,3,3,3,3, # a0 - a7 3,3,3,3,3,3,3,3, # a8 - af 3,3,3,3,3,3,3,3, # b0 - b7 3,3,3,3,3,3,3,3, # b8 - bf 3,3,3,3,3,3,3,3, # c0 - c7 3,3,3,3,3,3,3,3, # c8 - cf 3,3,3,3,3,3,3,3, # d0 - d7 3,3,3,3,3,3,3,3, # d8 - df 3,3,3,3,3,3,3,3, # e0 - e7 3,3,3,3,3,3,3,3, # e8 - ef 3,3,3,3,3,3,3,3, # f0 - f7 3,3,3,3,3,3,3,0 # f8 - ff ) BIG5_st = ( eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17 ) Big5CharLenTable = (0, 1, 1, 2, 0) Big5SMModel = {'classTable': BIG5_cls, 'classFactor': 5, 'stateTable': BIG5_st, 'charLenTable': Big5CharLenTable, 'name': 'Big5'} # CP949 CP949_cls = ( 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff ) CP949_st = ( #cls= 0 1 2 3 4 5 6 7 8 9 # previous state = eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3 eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4 eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5 eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6 ) CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) CP949SMModel = {'classTable': CP949_cls, 'classFactor': 10, 'stateTable': CP949_st, 'charLenTable': CP949CharLenTable, 'name': 'CP949'} # EUC-JP EUCJP_cls = ( 4,4,4,4,4,4,4,4, # 00 - 07 4,4,4,4,4,4,5,5, # 08 - 0f 4,4,4,4,4,4,4,4, # 10 - 17 4,4,4,5,4,4,4,4, # 18 - 1f 4,4,4,4,4,4,4,4, # 20 - 27 4,4,4,4,4,4,4,4, # 28 - 2f 4,4,4,4,4,4,4,4, # 30 - 37 4,4,4,4,4,4,4,4, # 38 - 3f 4,4,4,4,4,4,4,4, # 40 - 47 4,4,4,4,4,4,4,4, # 48 - 4f 4,4,4,4,4,4,4,4, # 50 - 57 4,4,4,4,4,4,4,4, # 58 - 5f 4,4,4,4,4,4,4,4, # 60 - 67 4,4,4,4,4,4,4,4, # 68 - 6f 4,4,4,4,4,4,4,4, # 70 - 77 4,4,4,4,4,4,4,4, # 78 - 7f 5,5,5,5,5,5,5,5, # 80 - 87 5,5,5,5,5,5,1,3, # 88 - 8f 5,5,5,5,5,5,5,5, # 90 - 97 5,5,5,5,5,5,5,5, # 98 - 9f 5,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 0,0,0,0,0,0,0,0, # e0 - e7 0,0,0,0,0,0,0,0, # e8 - ef 0,0,0,0,0,0,0,0, # f0 - f7 0,0,0,0,0,0,0,5 # f8 - ff ) EUCJP_st = ( 3, 4, 3, 5,eStart,eError,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17 eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f 3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27 ) EUCJPCharLenTable = (2, 2, 2, 3, 1, 0) EUCJPSMModel = {'classTable': EUCJP_cls, 'classFactor': 6, 'stateTable': EUCJP_st, 'charLenTable': EUCJPCharLenTable, 'name': 'EUC-JP'} # EUC-KR EUCKR_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 1,1,1,1,1,1,1,1, # 40 - 47 1,1,1,1,1,1,1,1, # 48 - 4f 1,1,1,1,1,1,1,1, # 50 - 57 1,1,1,1,1,1,1,1, # 58 - 5f 1,1,1,1,1,1,1,1, # 60 - 67 1,1,1,1,1,1,1,1, # 68 - 6f 1,1,1,1,1,1,1,1, # 70 - 77 1,1,1,1,1,1,1,1, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,0,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,3,3,3, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,3,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,0 # f8 - ff ) EUCKR_st = ( eError,eStart, 3,eError,eError,eError,eError,eError,#00-07 eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f ) EUCKRCharLenTable = (0, 1, 2, 0) EUCKRSMModel = {'classTable': EUCKR_cls, 'classFactor': 4, 'stateTable': EUCKR_st, 'charLenTable': EUCKRCharLenTable, 'name': 'EUC-KR'} # EUC-TW EUCTW_cls = ( 2,2,2,2,2,2,2,2, # 00 - 07 2,2,2,2,2,2,0,0, # 08 - 0f 2,2,2,2,2,2,2,2, # 10 - 17 2,2,2,0,2,2,2,2, # 18 - 1f 2,2,2,2,2,2,2,2, # 20 - 27 2,2,2,2,2,2,2,2, # 28 - 2f 2,2,2,2,2,2,2,2, # 30 - 37 2,2,2,2,2,2,2,2, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,2, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,6,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,3,4,4,4,4,4,4, # a0 - a7 5,5,1,1,1,1,1,1, # a8 - af 1,1,1,1,1,1,1,1, # b0 - b7 1,1,1,1,1,1,1,1, # b8 - bf 1,1,3,1,3,3,3,3, # c0 - c7 3,3,3,3,3,3,3,3, # c8 - cf 3,3,3,3,3,3,3,3, # d0 - d7 3,3,3,3,3,3,3,3, # d8 - df 3,3,3,3,3,3,3,3, # e0 - e7 3,3,3,3,3,3,3,3, # e8 - ef 3,3,3,3,3,3,3,3, # f0 - f7 3,3,3,3,3,3,3,0 # f8 - ff ) EUCTW_st = ( eError,eError,eStart, 3, 3, 3, 4,eError,#00-07 eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17 eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f 5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27 eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f ) EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3) EUCTWSMModel = {'classTable': EUCTW_cls, 'classFactor': 7, 'stateTable': EUCTW_st, 'charLenTable': EUCTWCharLenTable, 'name': 'x-euc-tw'} # GB2312 GB2312_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 3,3,3,3,3,3,3,3, # 30 - 37 3,3,1,1,1,1,1,1, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,4, # 78 - 7f 5,6,6,6,6,6,6,6, # 80 - 87 6,6,6,6,6,6,6,6, # 88 - 8f 6,6,6,6,6,6,6,6, # 90 - 97 6,6,6,6,6,6,6,6, # 98 - 9f 6,6,6,6,6,6,6,6, # a0 - a7 6,6,6,6,6,6,6,6, # a8 - af 6,6,6,6,6,6,6,6, # b0 - b7 6,6,6,6,6,6,6,6, # b8 - bf 6,6,6,6,6,6,6,6, # c0 - c7 6,6,6,6,6,6,6,6, # c8 - cf 6,6,6,6,6,6,6,6, # d0 - d7 6,6,6,6,6,6,6,6, # d8 - df 6,6,6,6,6,6,6,6, # e0 - e7 6,6,6,6,6,6,6,6, # e8 - ef 6,6,6,6,6,6,6,6, # f0 - f7 6,6,6,6,6,6,6,0 # f8 - ff ) GB2312_st = ( eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07 eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17 4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27 eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f ) # To be accurate, the length of class 6 can be either 2 or 4. # But it is not necessary to discriminate between the two since # it is used for frequency analysis only, and we are validing # each code range there as well. So it is safe to set it to be # 2 here. GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2) GB2312SMModel = {'classTable': GB2312_cls, 'classFactor': 7, 'stateTable': GB2312_st, 'charLenTable': GB2312CharLenTable, 'name': 'GB2312'} # Shift_JIS SJIS_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,1, # 78 - 7f 3,3,3,3,3,2,2,3, # 80 - 87 3,3,3,3,3,3,3,3, # 88 - 8f 3,3,3,3,3,3,3,3, # 90 - 97 3,3,3,3,3,3,3,3, # 98 - 9f #0xa0 is illegal in sjis encoding, but some pages does #contain such byte. We need to be more error forgiven. 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 3,3,3,3,3,3,3,3, # e0 - e7 3,3,3,3,3,4,4,4, # e8 - ef 3,3,3,3,3,3,3,3, # f0 - f7 3,3,3,3,3,0,0,0) # f8 - ff SJIS_st = ( eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17 ) SJISCharLenTable = (0, 1, 1, 2, 0, 0) SJISSMModel = {'classTable': SJIS_cls, 'classFactor': 6, 'stateTable': SJIS_st, 'charLenTable': SJISCharLenTable, 'name': 'Shift_JIS'} # UCS2-BE UCS2BE_cls = ( 0,0,0,0,0,0,0,0, # 00 - 07 0,0,1,0,0,2,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,3,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,3,3,3,3,3,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,0,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,0,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,0,0,0,0,0,0,0, # a0 - a7 0,0,0,0,0,0,0,0, # a8 - af 0,0,0,0,0,0,0,0, # b0 - b7 0,0,0,0,0,0,0,0, # b8 - bf 0,0,0,0,0,0,0,0, # c0 - c7 0,0,0,0,0,0,0,0, # c8 - cf 0,0,0,0,0,0,0,0, # d0 - d7 0,0,0,0,0,0,0,0, # d8 - df 0,0,0,0,0,0,0,0, # e0 - e7 0,0,0,0,0,0,0,0, # e8 - ef 0,0,0,0,0,0,0,0, # f0 - f7 0,0,0,0,0,0,4,5 # f8 - ff ) UCS2BE_st = ( 5, 7, 7,eError, 4, 3,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17 6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f 6, 6, 6, 6, 5, 7, 7,eError,#20-27 5, 8, 6, 6,eError, 6, 6, 6,#28-2f 6, 6, 6, 6,eError,eError,eStart,eStart #30-37 ) UCS2BECharLenTable = (2, 2, 2, 0, 2, 2) UCS2BESMModel = {'classTable': UCS2BE_cls, 'classFactor': 6, 'stateTable': UCS2BE_st, 'charLenTable': UCS2BECharLenTable, 'name': 'UTF-16BE'} # UCS2-LE UCS2LE_cls = ( 0,0,0,0,0,0,0,0, # 00 - 07 0,0,1,0,0,2,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,3,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,3,3,3,3,3,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,0,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,0,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,0,0,0,0,0,0,0, # a0 - a7 0,0,0,0,0,0,0,0, # a8 - af 0,0,0,0,0,0,0,0, # b0 - b7 0,0,0,0,0,0,0,0, # b8 - bf 0,0,0,0,0,0,0,0, # c0 - c7 0,0,0,0,0,0,0,0, # c8 - cf 0,0,0,0,0,0,0,0, # d0 - d7 0,0,0,0,0,0,0,0, # d8 - df 0,0,0,0,0,0,0,0, # e0 - e7 0,0,0,0,0,0,0,0, # e8 - ef 0,0,0,0,0,0,0,0, # f0 - f7 0,0,0,0,0,0,4,5 # f8 - ff ) UCS2LE_st = ( 6, 6, 7, 6, 4, 3,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17 5, 5, 5,eError, 5,eError, 6, 6,#18-1f 7, 6, 8, 8, 5, 5, 5,eError,#20-27 5, 5, 5,eError,eError,eError, 5, 5,#28-2f 5, 5, 5,eError, 5,eError,eStart,eStart #30-37 ) UCS2LECharLenTable = (2, 2, 2, 2, 2, 2) UCS2LESMModel = {'classTable': UCS2LE_cls, 'classFactor': 6, 'stateTable': UCS2LE_st, 'charLenTable': UCS2LECharLenTable, 'name': 'UTF-16LE'} # UTF-8 UTF8_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 1,1,1,1,1,1,1,1, # 40 - 47 1,1,1,1,1,1,1,1, # 48 - 4f 1,1,1,1,1,1,1,1, # 50 - 57 1,1,1,1,1,1,1,1, # 58 - 5f 1,1,1,1,1,1,1,1, # 60 - 67 1,1,1,1,1,1,1,1, # 68 - 6f 1,1,1,1,1,1,1,1, # 70 - 77 1,1,1,1,1,1,1,1, # 78 - 7f 2,2,2,2,3,3,3,3, # 80 - 87 4,4,4,4,4,4,4,4, # 88 - 8f 4,4,4,4,4,4,4,4, # 90 - 97 4,4,4,4,4,4,4,4, # 98 - 9f 5,5,5,5,5,5,5,5, # a0 - a7 5,5,5,5,5,5,5,5, # a8 - af 5,5,5,5,5,5,5,5, # b0 - b7 5,5,5,5,5,5,5,5, # b8 - bf 0,0,6,6,6,6,6,6, # c0 - c7 6,6,6,6,6,6,6,6, # c8 - cf 6,6,6,6,6,6,6,6, # d0 - d7 6,6,6,6,6,6,6,6, # d8 - df 7,8,8,8,8,8,8,8, # e0 - e7 8,8,8,8,8,9,8,8, # e8 - ef 10,11,11,11,11,11,11,11, # f0 - f7 12,13,13,13,14,15,0,0 # f8 - ff ) UTF8_st = ( eError,eStart,eError,eError,eError,eError, 12, 10,#00-07 9, 11, 8, 7, 6, 5, 4, 3,#08-0f eError,eError,eError,eError,eError,eError,eError,eError,#10-17 eError,eError,eError,eError,eError,eError,eError,eError,#18-1f eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27 eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f eError,eError, 5, 5, 5, 5,eError,eError,#30-37 eError,eError,eError,eError,eError,eError,eError,eError,#38-3f eError,eError,eError, 5, 5, 5,eError,eError,#40-47 eError,eError,eError,eError,eError,eError,eError,eError,#48-4f eError,eError, 7, 7, 7, 7,eError,eError,#50-57 eError,eError,eError,eError,eError,eError,eError,eError,#58-5f eError,eError,eError,eError, 7, 7,eError,eError,#60-67 eError,eError,eError,eError,eError,eError,eError,eError,#68-6f eError,eError, 9, 9, 9, 9,eError,eError,#70-77 eError,eError,eError,eError,eError,eError,eError,eError,#78-7f eError,eError,eError,eError,eError, 9,eError,eError,#80-87 eError,eError,eError,eError,eError,eError,eError,eError,#88-8f eError,eError, 12, 12, 12, 12,eError,eError,#90-97 eError,eError,eError,eError,eError,eError,eError,eError,#98-9f eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7 eError,eError,eError,eError,eError,eError,eError,eError,#a8-af eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7 eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7 eError,eError,eError,eError,eError,eError,eError,eError #c8-cf ) UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) UTF8SMModel = {'classTable': UTF8_cls, 'classFactor': 16, 'stateTable': UTF8_st, 'charLenTable': UTF8CharLenTable, 'name': 'UTF-8'}
mit
damdam-s/OCB
openerp/tools/config.py
10
36945
#openerp.loggers.handlers. -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import ConfigParser import optparse import os import sys import openerp import openerp.conf import openerp.loglevels as loglevels import logging import openerp.release as release import appdirs class MyOption (optparse.Option, object): """ optparse Option with two additional attributes. The list of command line options (getopt.Option) is used to create the list of the configuration file options. When reading the file, and then reading the command line arguments, we don't want optparse.parse results to override the configuration file values. But if we provide default values to optparse, optparse will return them and we can't know if they were really provided by the user or not. A solution is to not use optparse's default attribute, but use a custom one (that will be copied to create the default values of the configuration file). """ def __init__(self, *opts, **attrs): self.my_default = attrs.pop('my_default', None) super(MyOption, self).__init__(*opts, **attrs) DEFAULT_LOG_HANDLER = ':INFO' def _check_ssl(): try: from OpenSSL import SSL import socket return hasattr(socket, 'ssl') and hasattr(SSL, "Connection") except: return False def _get_default_datadir(): home = os.path.expanduser('~') if os.path.exists(home): func = appdirs.user_data_dir else: if sys.platform in ['win32', 'darwin']: func = appdirs.site_data_dir else: func = lambda **kwarg: "/var/lib/%s" % kwarg['appname'].lower() # No "version" kwarg as session and filestore paths are shared against series return func(appname=release.product_name, appauthor=release.author) def _deduplicate_loggers(loggers): """ Avoid saving multiple logging levels for the same loggers to a save file, that just takes space and the list can potentially grow unbounded if for some odd reason people use :option`odoo.py --save`` all the time. """ # dict(iterable) -> the last item of iterable for any given key wins, # which is what we want and expect. Output order should not matter as # there are no duplicates within the output sequence return ( '{}:{}'.format(logger, level) for logger, level in dict(it.split(':') for it in loggers).iteritems() ) class configmanager(object): def __init__(self, fname=None): """Constructor. :param fname: a shortcut allowing to instantiate :class:`configmanager` from Python code without resorting to environment variable """ # Options not exposed on the command line. Command line options will be added # from optparse's parser. self.options = { 'admin_passwd': 'admin', 'csv_internal_sep': ',', 'publisher_warranty_url': 'http://services.openerp.com/publisher-warranty/', 'reportgz': False, 'root_path': None, } # Not exposed in the configuration file. self.blacklist_for_save = set([ 'publisher_warranty_url', 'load_language', 'root_path', 'init', 'save', 'config', 'update', 'stop_after_init' ]) # dictionary mapping option destination (keys in self.options) to MyOptions. self.casts = {} self.misc = {} self.config_file = fname self.has_ssl = _check_ssl() self._LOGLEVELS = dict([ (getattr(loglevels, 'LOG_%s' % x), getattr(logging, x)) for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET') ]) version = "%s %s" % (release.description, release.version) self.parser = parser = optparse.OptionParser(version=version, option_class=MyOption) # Server startup config group = optparse.OptionGroup(parser, "Common options") group.add_option("-c", "--config", dest="config", help="specify alternate config file") group.add_option("-s", "--save", action="store_true", dest="save", default=False, help="save configuration to ~/.openerp_serverrc") group.add_option("-i", "--init", dest="init", help="install one or more modules (comma-separated list, use \"all\" for all modules), requires -d") group.add_option("-u", "--update", dest="update", help="update one or more modules (comma-separated list, use \"all\" for all modules). Requires -d.") group.add_option("--without-demo", dest="without_demo", help="disable loading demo data for modules to be installed (comma-separated, use \"all\" for all modules). Requires -d and -i. Default is %default", my_default=False) group.add_option("-P", "--import-partial", dest="import_partial", my_default='', help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.") group.add_option("--pidfile", dest="pidfile", help="file where the server pid will be stored") group.add_option("--addons-path", dest="addons_path", help="specify additional addons paths (separated by commas).", action="callback", callback=self._check_addons_path, nargs=1, type="string") group.add_option("--load", dest="server_wide_modules", help="Comma-separated list of server-wide modules default=web") group.add_option("-D", "--data-dir", dest="data_dir", my_default=_get_default_datadir(), help="Directory where to store Odoo data") parser.add_option_group(group) # XML-RPC / HTTP group = optparse.OptionGroup(parser, "XML-RPC Configuration") group.add_option("--xmlrpc-interface", dest="xmlrpc_interface", my_default='', help="Specify the TCP IP address for the XML-RPC protocol. The empty string binds to all interfaces.") group.add_option("--xmlrpc-port", dest="xmlrpc_port", my_default=8069, help="specify the TCP port for the XML-RPC protocol", type="int") group.add_option("--no-xmlrpc", dest="xmlrpc", action="store_false", my_default=True, help="disable the XML-RPC protocol") group.add_option("--proxy-mode", dest="proxy_mode", action="store_true", my_default=False, help="Enable correct behavior when behind a reverse proxy") group.add_option("--longpolling-port", dest="longpolling_port", my_default=8072, help="specify the TCP port for longpolling requests", type="int") parser.add_option_group(group) # XML-RPC / HTTPS title = "XML-RPC Secure Configuration" if not self.has_ssl: title += " (disabled as ssl is unavailable)" group = optparse.OptionGroup(parser, title) group.add_option("--xmlrpcs-interface", dest="xmlrpcs_interface", my_default='', help="Specify the TCP IP address for the XML-RPC Secure protocol. The empty string binds to all interfaces.") group.add_option("--xmlrpcs-port", dest="xmlrpcs_port", my_default=8071, help="specify the TCP port for the XML-RPC Secure protocol", type="int") group.add_option("--no-xmlrpcs", dest="xmlrpcs", action="store_false", my_default=True, help="disable the XML-RPC Secure protocol") group.add_option("--cert-file", dest="secure_cert_file", my_default='server.cert', help="specify the certificate file for the SSL connection") group.add_option("--pkey-file", dest="secure_pkey_file", my_default='server.pkey', help="specify the private key file for the SSL connection") parser.add_option_group(group) # WEB group = optparse.OptionGroup(parser, "Web interface Configuration") group.add_option("--db-filter", dest="dbfilter", my_default='.*', help="Filter listed database", metavar="REGEXP") parser.add_option_group(group) # Testing Group group = optparse.OptionGroup(parser, "Testing Configuration") group.add_option("--test-file", dest="test_file", my_default=False, help="Launch a python or YML test file.") group.add_option("--test-report-directory", dest="test_report_directory", my_default=False, help="If set, will save sample of all reports in this directory.") group.add_option("--test-enable", action="store_true", dest="test_enable", my_default=False, help="Enable YAML and unit tests.") group.add_option("--test-commit", action="store_true", dest="test_commit", my_default=False, help="Commit database changes performed by YAML or XML tests.") parser.add_option_group(group) # Logging Group group = optparse.OptionGroup(parser, "Logging Configuration") group.add_option("--logfile", dest="logfile", help="file where the server log will be stored") group.add_option("--logrotate", dest="logrotate", action="store_true", my_default=False, help="enable logfile rotation") group.add_option("--syslog", action="store_true", dest="syslog", my_default=False, help="Send the log to the syslog server") group.add_option('--log-handler', action="append", default=[], my_default=DEFAULT_LOG_HANDLER, metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "openerp.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")') group.add_option('--log-request', action="append_const", dest="log_handler", const="openerp.http.rpc.request:DEBUG", help='shortcut for --log-handler=openerp.http.rpc.request:DEBUG') group.add_option('--log-response', action="append_const", dest="log_handler", const="openerp.http.rpc.response:DEBUG", help='shortcut for --log-handler=openerp.http.rpc.response:DEBUG') group.add_option('--log-web', action="append_const", dest="log_handler", const="openerp.http:DEBUG", help='shortcut for --log-handler=openerp.http:DEBUG') group.add_option('--log-sql', action="append_const", dest="log_handler", const="openerp.sql_db:DEBUG", help='shortcut for --log-handler=openerp.sql_db:DEBUG') group.add_option('--log-db', dest='log_db', help="Logging database", my_default=False) group.add_option('--log-db-level', dest='log_db_level', my_default='warning', help="Logging database level") # For backward-compatibility, map the old log levels to something # quite close. levels = [ 'info', 'debug_rpc', 'warn', 'test', 'critical', 'debug_sql', 'error', 'debug', 'debug_rpc_answer', 'notset' ] group.add_option('--log-level', dest='log_level', type='choice', choices=levels, my_default='info', help='specify the level of the logging. Accepted values: %s.' % (levels,)) parser.add_option_group(group) # SMTP Group group = optparse.OptionGroup(parser, "SMTP Configuration") group.add_option('--email-from', dest='email_from', my_default=False, help='specify the SMTP email address for sending email') group.add_option('--smtp', dest='smtp_server', my_default='localhost', help='specify the SMTP server for sending email') group.add_option('--smtp-port', dest='smtp_port', my_default=25, help='specify the SMTP port', type="int") group.add_option('--smtp-ssl', dest='smtp_ssl', action='store_true', my_default=False, help='if passed, SMTP connections will be encrypted with SSL (STARTTLS)') group.add_option('--smtp-user', dest='smtp_user', my_default=False, help='specify the SMTP username for sending email') group.add_option('--smtp-password', dest='smtp_password', my_default=False, help='specify the SMTP password for sending email') parser.add_option_group(group) group = optparse.OptionGroup(parser, "Database related options") group.add_option("-d", "--database", dest="db_name", my_default=False, help="specify the database name") group.add_option("-r", "--db_user", dest="db_user", my_default=False, help="specify the database user name") group.add_option("-w", "--db_password", dest="db_password", my_default=False, help="specify the database password") group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path") group.add_option("--db_host", dest="db_host", my_default=False, help="specify the database host") group.add_option("--db_port", dest="db_port", my_default=False, help="specify the database port", type="int") group.add_option("--db_maxconn", dest="db_maxconn", type='int', my_default=64, help="specify the the maximum number of physical connections to posgresql") group.add_option("--db-template", dest="db_template", my_default="template1", help="specify a custom database template to create a new database") parser.add_option_group(group) group = optparse.OptionGroup(parser, "Internationalisation options", "Use these options to translate Odoo to another language." "See i18n section of the user manual. Option '-d' is mandatory." "Option '-l' is mandatory in case of importation" ) group.add_option('--load-language', dest="load_language", help="specifies the languages for the translations you want to be loaded") group.add_option('-l', "--language", dest="language", help="specify the language of the translation file. Use it with --i18n-export or --i18n-import") group.add_option("--i18n-export", dest="translate_out", help="export all sentences to be translated to a CSV file, a PO file or a TGZ archive and exit") group.add_option("--i18n-import", dest="translate_in", help="import a CSV or a PO file with translations and exit. The '-l' option is required.") group.add_option("--i18n-overwrite", dest="overwrite_existing_translations", action="store_true", my_default=False, help="overwrites existing translation terms on updating a module or importing a CSV or a PO file.") group.add_option("--modules", dest="translate_modules", help="specify modules to export. Use in combination with --i18n-export") parser.add_option_group(group) security = optparse.OptionGroup(parser, 'Security-related options') security.add_option('--no-database-list', action="store_false", dest='list_db', my_default=True, help="disable the ability to return the list of databases") parser.add_option_group(security) # Advanced options group = optparse.OptionGroup(parser, "Advanced options") if os.name == 'posix': group.add_option('--auto-reload', dest='auto_reload', action='store_true', my_default=False, help='enable auto reload') group.add_option('--debug', dest='debug_mode', action='store_true', my_default=False, help='enable debug mode') group.add_option("--stop-after-init", action="store_true", dest="stop_after_init", my_default=False, help="stop the server after its initialization") group.add_option("-t", "--timezone", dest="timezone", my_default=False, help="specify reference timezone for the server (e.g. Europe/Brussels") group.add_option("--osv-memory-count-limit", dest="osv_memory_count_limit", my_default=False, help="Force a limit on the maximum number of records kept in the virtual " "osv_memory tables. The default is False, which means no count-based limit.", type="int") group.add_option("--osv-memory-age-limit", dest="osv_memory_age_limit", my_default=1.0, help="Force a limit on the maximum age of records kept in the virtual " "osv_memory tables. This is a decimal value expressed in hours, " "and the default is 1 hour.", type="float") group.add_option("--max-cron-threads", dest="max_cron_threads", my_default=2, help="Maximum number of threads processing concurrently cron jobs (default 2).", type="int") group.add_option("--unaccent", dest="unaccent", my_default=False, action="store_true", help="Use the unaccent function provided by the database when available.") group.add_option("--geoip-db", dest="geoip_database", my_default='/usr/share/GeoIP/GeoLiteCity.dat', help="Absolute path to the GeoIP database file.") parser.add_option_group(group) if os.name == 'posix': group = optparse.OptionGroup(parser, "Multiprocessing options") # TODO sensible default for the three following limits. group.add_option("--workers", dest="workers", my_default=0, help="Specify the number of workers, 0 disable prefork mode.", type="int") group.add_option("--limit-memory-soft", dest="limit_memory_soft", my_default=2048 * 1024 * 1024, help="Maximum allowed virtual memory per worker, when reached the worker be reset after the current request (default 671088640 aka 640MB).", type="int") group.add_option("--limit-memory-hard", dest="limit_memory_hard", my_default=2560 * 1024 * 1024, help="Maximum allowed virtual memory per worker, when reached, any memory allocation will fail (default 805306368 aka 768MB).", type="int") group.add_option("--limit-time-cpu", dest="limit_time_cpu", my_default=60, help="Maximum allowed CPU time per request (default 60).", type="int") group.add_option("--limit-time-real", dest="limit_time_real", my_default=120, help="Maximum allowed Real time per request (default 120).", type="int") group.add_option("--limit-request", dest="limit_request", my_default=8192, help="Maximum number of request to be processed per worker (default 8192).", type="int") parser.add_option_group(group) # Copy all optparse options (i.e. MyOption) into self.options. for group in parser.option_groups: for option in group.option_list: if option.dest not in self.options: self.options[option.dest] = option.my_default self.casts[option.dest] = option # generate default config self._parse_config() def parse_config(self, args=None): """ Parse the configuration file (if any) and the command-line arguments. This method initializes openerp.tools.config and openerp.conf (the former should be removed in the furture) with library-wide configuration values. This method must be called before proper usage of this library can be made. Typical usage of this method: openerp.tools.config.parse_config(sys.argv[1:]) """ self._parse_config(args) openerp.netsvc.init_logger() openerp.modules.module.initialize_sys_path() def _parse_config(self, args=None): if args is None: args = [] opt, args = self.parser.parse_args(args) def die(cond, msg): if cond: self.parser.error(msg) # Ensures no illegitimate argument is silently discarded (avoids insidious "hyphen to dash" problem) die(args, "unrecognized parameters: '%s'" % " ".join(args)) die(bool(opt.syslog) and bool(opt.logfile), "the syslog and logfile options are exclusive") die(opt.translate_in and (not opt.language or not opt.db_name), "the i18n-import option cannot be used without the language (-l) and the database (-d) options") die(opt.overwrite_existing_translations and not (opt.translate_in or opt.update), "the i18n-overwrite option cannot be used without the i18n-import option or without the update option") die(opt.translate_out and (not opt.db_name), "the i18n-export option cannot be used without the database (-d) option") # Check if the config file exists (-c used, but not -s) die(not opt.save and opt.config and not os.access(opt.config, os.R_OK), "The config file '%s' selected with -c/--config doesn't exist or is not readable, "\ "use -s/--save if you want to generate it"% opt.config) # place/search the config file on Win32 near the server installation # (../etc from the server) # if the server is run by an unprivileged user, he has to specify location of a config file where he has the rights to write, # else he won't be able to save the configurations, or even to start the server... # TODO use appdirs if os.name == 'nt': rcfilepath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'openerp-server.conf') else: rcfilepath = os.path.expanduser('~/.openerp_serverrc') self.rcfile = os.path.abspath( self.config_file or opt.config or os.environ.get('OPENERP_SERVER') or rcfilepath) self.load() # Verify that we want to log or not, if not the output will go to stdout if self.options['logfile'] in ('None', 'False'): self.options['logfile'] = False # the same for the pidfile if self.options['pidfile'] in ('None', 'False'): self.options['pidfile'] = False # if defined dont take the configfile value even if the defined value is None keys = ['xmlrpc_interface', 'xmlrpc_port', 'longpolling_port', 'db_name', 'db_user', 'db_password', 'db_host', 'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port', 'email_from', 'smtp_server', 'smtp_user', 'smtp_password', 'db_maxconn', 'import_partial', 'addons_path', 'xmlrpc', 'syslog', 'without_demo', 'timezone', 'xmlrpcs_interface', 'xmlrpcs_port', 'xmlrpcs', 'secure_cert_file', 'secure_pkey_file', 'dbfilter', 'log_level', 'log_db', 'log_db_level', 'geoip_database', ] for arg in keys: # Copy the command-line argument (except the special case for log_handler, due to # action=append requiring a real default, so we cannot use the my_default workaround) if getattr(opt, arg): self.options[arg] = getattr(opt, arg) # ... or keep, but cast, the config file value. elif isinstance(self.options[arg], basestring) and self.casts[arg].type in optparse.Option.TYPE_CHECKER: self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg]) if isinstance(self.options['log_handler'], basestring): self.options['log_handler'] = self.options['log_handler'].split(',') self.options['log_handler'].extend(opt.log_handler) # if defined but None take the configfile value keys = [ 'language', 'translate_out', 'translate_in', 'overwrite_existing_translations', 'debug_mode', 'smtp_ssl', 'load_language', 'stop_after_init', 'logrotate', 'without_demo', 'xmlrpc', 'syslog', 'list_db', 'xmlrpcs', 'proxy_mode', 'test_file', 'test_enable', 'test_commit', 'test_report_directory', 'osv_memory_count_limit', 'osv_memory_age_limit', 'max_cron_threads', 'unaccent', 'data_dir', ] posix_keys = [ 'auto_reload', 'workers', 'limit_memory_hard', 'limit_memory_soft', 'limit_time_cpu', 'limit_time_real', 'limit_request', ] if os.name == 'posix': keys += posix_keys else: self.options.update(dict.fromkeys(posix_keys, None)) # Copy the command-line arguments... for arg in keys: if getattr(opt, arg) is not None: self.options[arg] = getattr(opt, arg) # ... or keep, but cast, the config file value. elif isinstance(self.options[arg], basestring) and self.casts[arg].type in optparse.Option.TYPE_CHECKER: self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg]) self.options['root_path'] = os.path.abspath(os.path.expanduser(os.path.expandvars(os.path.dirname(openerp.__file__)))) if not self.options['addons_path'] or self.options['addons_path']=='None': default_addons = [] base_addons = os.path.join(self.options['root_path'], 'addons') if os.path.exists(base_addons): default_addons.append(base_addons) main_addons = os.path.abspath(os.path.join(self.options['root_path'], '../addons')) if os.path.exists(main_addons): default_addons.append(main_addons) self.options['addons_path'] = ','.join(default_addons) else: self.options['addons_path'] = ",".join( os.path.abspath(os.path.expanduser(os.path.expandvars(x.strip()))) for x in self.options['addons_path'].split(',')) self.options['init'] = opt.init and dict.fromkeys(opt.init.split(','), 1) or {} self.options["demo"] = not opt.without_demo and self.options['init'] or {} self.options['update'] = opt.update and dict.fromkeys(opt.update.split(','), 1) or {} self.options['translate_modules'] = opt.translate_modules and map(lambda m: m.strip(), opt.translate_modules.split(',')) or ['all'] self.options['translate_modules'].sort() # TODO checking the type of the parameters should be done for every # parameters, not just the timezone. # The call to get_server_timezone() sets the timezone; this should # probably done here. if self.options['timezone']: # Prevent the timezone to be True. (The config file parsing changes # the string 'True' to the boolean value True. It would be probably # be better to remove that conversion.) die(not isinstance(self.options['timezone'], basestring), "Invalid timezone value in configuration or environment: %r.\n" "Please fix this in your configuration." %(self.options['timezone'])) # If an explicit TZ was provided in the config, make sure it is known try: import pytz pytz.timezone(self.options['timezone']) except pytz.UnknownTimeZoneError: die(True, "The specified timezone (%s) is invalid" % self.options['timezone']) except: # If pytz is missing, don't check the provided TZ, it will be ignored anyway. pass if opt.pg_path: self.options['pg_path'] = opt.pg_path if self.options.get('language', False): if len(self.options['language']) > 5: raise Exception('ERROR: The Lang name must take max 5 chars, Eg: -lfr_BE') if not self.options['db_user']: try: import getpass self.options['db_user'] = getpass.getuser() except: self.options['db_user'] = None die(not self.options['db_user'], 'ERROR: No user specified for the connection to the database') if self.options['db_password']: if sys.platform == 'win32' and not self.options['db_host']: self.options['db_host'] = 'localhost' #if self.options['db_host']: # self._generate_pgpassfile() if opt.save: self.save() openerp.conf.addons_paths = self.options['addons_path'].split(',') if opt.server_wide_modules: openerp.conf.server_wide_modules = map(lambda m: m.strip(), opt.server_wide_modules.split(',')) else: openerp.conf.server_wide_modules = ['web','web_kanban'] def _generate_pgpassfile(self): """ Generate the pgpass file with the parameters from the command line (db_host, db_user, db_password) Used because pg_dump and pg_restore can not accept the password on the command line. """ is_win32 = sys.platform == 'win32' if is_win32: filename = os.path.join(os.environ['APPDATA'], 'pgpass.conf') else: filename = os.path.join(os.environ['HOME'], '.pgpass') text_to_add = "%(db_host)s:*:*:%(db_user)s:%(db_password)s" % self.options if os.path.exists(filename): content = [x.strip() for x in file(filename, 'r').readlines()] if text_to_add in content: return fp = file(filename, 'a+') fp.write(text_to_add + "\n") fp.close() if is_win32: try: import _winreg except ImportError: _winreg = None x=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE) y = _winreg.OpenKey(x, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", 0,_winreg.KEY_ALL_ACCESS) _winreg.SetValueEx(y,"PGPASSFILE", 0, _winreg.REG_EXPAND_SZ, filename ) _winreg.CloseKey(y) _winreg.CloseKey(x) else: import stat os.chmod(filename, stat.S_IRUSR + stat.S_IWUSR) def _is_addons_path(self, path): for f in os.listdir(path): modpath = os.path.join(path, f) if os.path.isdir(modpath): def hasfile(filename): return os.path.isfile(os.path.join(modpath, filename)) if hasfile('__init__.py') and (hasfile('__openerp__.py') or hasfile('__terp__.py')): return True return False def _check_addons_path(self, option, opt, value, parser): ad_paths = [] for path in value.split(','): path = path.strip() res = os.path.abspath(os.path.expanduser(path)) if not os.path.isdir(res): raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, path)) if not self._is_addons_path(res): raise optparse.OptionValueError("option %s: The addons-path %r does not seem to a be a valid Addons Directory!" % (opt, path)) ad_paths.append(res) setattr(parser.values, option.dest, ",".join(ad_paths)) def load(self): p = ConfigParser.ConfigParser() try: p.read([self.rcfile]) for (name,value) in p.items('options'): if value=='True' or value=='true': value = True if value=='False' or value=='false': value = False self.options[name] = value #parse the other sections, as well for sec in p.sections(): if sec == 'options': continue if not self.misc.has_key(sec): self.misc[sec]= {} for (name, value) in p.items(sec): if value=='True' or value=='true': value = True if value=='False' or value=='false': value = False self.misc[sec][name] = value except IOError: pass except ConfigParser.NoSectionError: pass def save(self): p = ConfigParser.ConfigParser() loglevelnames = dict(zip(self._LOGLEVELS.values(), self._LOGLEVELS.keys())) p.add_section('options') for opt in sorted(self.options.keys()): if opt in ('version', 'language', 'translate_out', 'translate_in', 'overwrite_existing_translations', 'init', 'update'): continue if opt in self.blacklist_for_save: continue if opt in ('log_level',): p.set('options', opt, loglevelnames.get(self.options[opt], self.options[opt])) elif opt == 'log_handler': p.set('options', opt, ','.join(_deduplicate_loggers(self.options[opt]))) else: p.set('options', opt, self.options[opt]) for sec in sorted(self.misc.keys()): p.add_section(sec) for opt in sorted(self.misc[sec].keys()): p.set(sec,opt,self.misc[sec][opt]) # try to create the directories and write the file try: rc_exists = os.path.exists(self.rcfile) if not rc_exists and not os.path.exists(os.path.dirname(self.rcfile)): os.makedirs(os.path.dirname(self.rcfile)) try: p.write(file(self.rcfile, 'w')) if not rc_exists: os.chmod(self.rcfile, 0600) except IOError: sys.stderr.write("ERROR: couldn't write the config file\n") except OSError: # what to do if impossible? sys.stderr.write("ERROR: couldn't create the config directory\n") def get(self, key, default=None): return self.options.get(key, default) def get_misc(self, sect, key, default=None): return self.misc.get(sect,{}).get(key, default) def __setitem__(self, key, value): self.options[key] = value if key in self.options and isinstance(self.options[key], basestring) and \ key in self.casts and self.casts[key].type in optparse.Option.TYPE_CHECKER: self.options[key] = optparse.Option.TYPE_CHECKER[self.casts[key].type](self.casts[key], key, self.options[key]) def __getitem__(self, key): return self.options[key] @property def addons_data_dir(self): d = os.path.join(self['data_dir'], 'addons', release.series) if not os.path.exists(d): os.makedirs(d, 0700) else: assert os.access(d, os.W_OK), \ "%s: directory is not writable" % d return d @property def session_dir(self): d = os.path.join(self['data_dir'], 'sessions') if not os.path.exists(d): os.makedirs(d, 0700) else: assert os.access(d, os.W_OK), \ "%s: directory is not writable" % d return d def filestore(self, dbname): return os.path.join(self['data_dir'], 'filestore', dbname) config = configmanager() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mindbender-studio/setup
bin/windows/python36/Lib/site-packages/pkg_resources/_vendor/packaging/_structures.py
1152
1416
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function class Infinity(object): def __repr__(self): return "Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return False def __le__(self, other): return False def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return True def __ge__(self, other): return True def __neg__(self): return NegativeInfinity Infinity = Infinity() class NegativeInfinity(object): def __repr__(self): return "-Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return True def __le__(self, other): return True def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return False def __ge__(self, other): return False def __neg__(self): return Infinity NegativeInfinity = NegativeInfinity()
mit
devoid/nova
nova/console/rpcapi.py
12
2746
# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the console RPC API. """ from oslo.config import cfg from oslo import messaging from nova import rpc rpcapi_opts = [ cfg.StrOpt('console_topic', default='console', help='The topic console proxy nodes listen on'), ] CONF = cfg.CONF CONF.register_opts(rpcapi_opts) rpcapi_cap_opt = cfg.StrOpt('console', help='Set a version cap for messages sent to console services') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') class ConsoleAPI(object): '''Client side of the console rpc API. API version history: 1.0 - Initial version. 1.1 - Added get_backdoor_port() ... Grizzly and Havana support message version 1.1. So, any changes to existing methods in 1.x after that point should be done such that they can handle the version_cap being set to 1.1. 2.0 - Major API rev for Icehouse ''' VERSION_ALIASES = { 'grizzly': '1.1', 'havana': '1.1', } def __init__(self, topic=None, server=None): super(ConsoleAPI, self).__init__() topic = topic if topic else CONF.console_topic target = messaging.Target(topic=topic, server=server, version='2.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.console, CONF.upgrade_levels.console) self.client = rpc.get_client(target, version_cap=version_cap) def _get_compat_version(self, current, havana_compat): if not self.client.can_send_version(current): return havana_compat return current def add_console(self, ctxt, instance_id): # NOTE(russellb) Havana compat version = self._get_compat_version('2.0', '1.0') cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'add_console', instance_id=instance_id) def remove_console(self, ctxt, console_id): # NOTE(russellb) Havana compat version = self._get_compat_version('2.0', '1.0') cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'remove_console', console_id=console_id)
apache-2.0
DivineHime/seishirou
lib/requests/packages/chardet/jpcntx.py
289
19643
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), (0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), (1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), (0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), (0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), (0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), (0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), (0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), (2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), (0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), (0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis(object): NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 def __init__(self): self._total_rel = None self._rel_sample = None self._need_to_skip_char_num = None self._last_char_order = None self._done = None self.reset() def reset(self): self._total_rel = 0 # total sequence received # category counters, each integer counts sequence in its category self._rel_sample = [0] * self.NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer self._need_to_skip_char_num = 0 self._last_char_order = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made self._done = False def feed(self, byte_str, num_bytes): if self._done: return # The buffer we got is byte oriented, and a character may span in more than one # buffers. In case the last one or two byte in last buffer is not # complete, we record how many byte needed to complete that character # and skip these bytes here. We can choose to record those bytes as # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._need_to_skip_char_num while i < num_bytes: order, char_len = self.get_order(byte_str[i:i + 2]) i += char_len if i > num_bytes: self._need_to_skip_char_num = i - num_bytes self._last_char_order = -1 else: if (order != -1) and (self._last_char_order != -1): self._total_rel += 1 if self._total_rel > self.MAX_REL_THRESHOLD: self._done = True break self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 self._last_char_order = order def got_enough_data(self): return self._total_rel > self.ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._total_rel > self.MINIMUM_DATA_THRESHOLD: return (self._total_rel - self._rel_sample[0]) / self._total_rel else: return self.DONT_KNOW def get_order(self, byte_str): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def __init__(self): super(SJISContextAnalysis, self).__init__() self._charset_name = "SHIFT_JIS" @property def charset_name(self): return self._charset_name def get_order(self, byte_str): if not byte_str: return -1, 1 # find out current char's byte length first_char = byte_str[0] if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC): char_len = 2 if (first_char == 0x87) or (0xFA <= first_char <= 0xFC): self._charset_name = "CP932" else: char_len = 1 # return its order if it is hiragana if len(byte_str) > 1: second_char = byte_str[1] if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, char_len return -1, char_len class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, byte_str): if not byte_str: return -1, 1 # find out current char's byte length first_char = byte_str[0] if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): char_len = 2 elif first_char == 0x8F: char_len = 3 else: char_len = 1 # return its order if it is hiragana if len(byte_str) > 1: second_char = byte_str[1] if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, char_len return -1, char_len
gpl-3.0
fuzeman/plex.py
tests/test_preferences.py
1
1457
from plex import Plex from tests.core.helpers import read import responses # Set client configuration defaults Plex.configuration.defaults.server(host='mock') @responses.activate def test_get_all(): responses.add( responses.GET, 'http://mock:32400/:/prefs', body=read('fixtures/prefs.xml'), status=200, content_type='application/xml' ) container = Plex[':/prefs'].get() assert container is not None items = list(container) assert len(items) == 3 # Validate preferences assert items[0].id == "FriendlyName" assert items[0].group == 'general' assert items[1].id == "collectUsageData" assert items[1].group == 'general' assert items[2].id == "FSEventLibraryUpdatesEnabled" assert items[2].group == 'library' @responses.activate def test_get_single(): responses.add( responses.GET, 'http://mock:32400/:/prefs', body=read('fixtures/prefs.xml'), status=200, content_type='application/xml' ) item = Plex[':/prefs'].get('FriendlyName') assert item is not None # Validate preferences assert item.id == "FriendlyName" assert item.group == 'general' @responses.activate def test_set(): responses.add( responses.PUT, 'http://mock:32400/:/prefs', body='', status=200, content_type='application/xml' ) Plex[':/prefs'].set('FriendlyName', 'Mock Server') assert len(responses.calls) == 1
mit
rouault/pycsw
docs/conf.py
3
10581
# -*- coding: utf-8 -*- # ================================================================= # # Authors: Tom Kralidis <tomkralidis@gmail.com> # # Copyright (c) 2015 Tom Kralidis # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # ================================================================= # # pycsw documentation build configuration file, created by # sphinx-quickstart on Fri Aug 2 19:48:50 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pycsw' authors = u'Tom Kralidis' license = u'This work is licensed under a Creative Commons Attribution 4.0 International License' copyright = u'2010-2015, ' + authors + ' ' + license # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = open('../VERSION.txt').read().strip() # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%Y-%m-%d' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'sidebarbgcolor': '#356aa0', 'sidebarlinkcolor': '#ffffff', 'relbarlinkcolor': '#ffffff', 'footerbgcolor': '#356aa0' } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = 'Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%dT%H:%M:%SZ' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { #'index':'indexsidebar.html', '**':'indexsidebar.html', } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pycswdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pycsw.tex', u'pycsw Documentation', authors, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pycsw', u'pycsw Documentation', [authors], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pycsw', u'pycsw Documentation', authors, 'pycsw', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'pycsw' epub_author = authors epub_publisher = authors epub_copyright = copyright # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True
mit
ayoubg/gem5-graphics
gem5/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/reciprocal_estimation.py
62
2158
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' # RCPPS # RCPSS '''
bsd-3-clause
cmeessen/VelocityConversion
test.py
1
1632
import unittest import numpy as np from VelocityConversion import MantleConversion, UnavailableMethodError def assemblage(): a = { "ol": 0.617, "cpx": 0.133, "opx": 0.052, "gnt": 0.153, "jd": 0.045, "XFe": 0.11 } return a class TestVelocityConversion(unittest.TestCase): def test_vs_AlphaConst(self): MC = MantleConversion() MC.LoadArray(np.array([[0, 0, -50e3, 4287.65]])) MC.SetVelType('S') MC.SetMineralogy(assemblage()) MC.FillTables() MC.CalcPT() self.assertAlmostEqual(MC.Result_T[0], 1301.05, 1) self.assertAlmostEqual(MC.Result_Rho[0], 3335.2, 1) def test_vs_AlphaT(self): MC = MantleConversion() MC.LoadArray(np.array([[0, 0, -50e3, 4287.65]])) MC.SetAlpha('T') MC.SetVelType('S') MC.SetMineralogy(assemblage()) MC.FillTables() MC.CalcPT() self.assertAlmostEqual(MC.Result_T[0], 1321.95, 1) self.assertAlmostEqual(MC.Result_Rho[0], 3280.2, 1) def test_vs_AlphaPT(self): MC = MantleConversion() with self.assertRaises(UnavailableMethodError): MC.SetAlpha('PT') def test_vp_AlphaConst(self): MC = MantleConversion() MC.LoadArray(np.array([[0, 0, -50e3, 8e3]])) MC.SetVelType('P') MC.SetMineralogy(assemblage()) MC.FillTables() MC.CalcPT() self.assertAlmostEqual(MC.Result_T[0], 1141.9, 1) self.assertAlmostEqual(MC.Result_Rho[0], 3347.7, 1) if __name__ == "__main__": unittest.main(verbosity=2, buffer=True)
gpl-3.0
lmazuel/azure-sdk-for-python
azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/models/__init__.py
4
3967
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .operation_display import OperationDisplay from .dimension import Dimension from .metric_specification import MetricSpecification from .service_specification import ServiceSpecification from .operation import Operation from .storage_account_check_name_availability_parameters import StorageAccountCheckNameAvailabilityParameters from .sku_capability import SKUCapability from .restriction import Restriction from .sku import Sku from .check_name_availability_result import CheckNameAvailabilityResult from .custom_domain import CustomDomain from .encryption_service import EncryptionService from .encryption_services import EncryptionServices from .key_vault_properties import KeyVaultProperties from .encryption import Encryption from .virtual_network_rule import VirtualNetworkRule from .ip_rule import IPRule from .network_rule_set import NetworkRuleSet from .identity import Identity from .storage_account_create_parameters import StorageAccountCreateParameters from .endpoints import Endpoints from .storage_account import StorageAccount from .storage_account_key import StorageAccountKey from .storage_account_list_keys_result import StorageAccountListKeysResult from .storage_account_regenerate_key_parameters import StorageAccountRegenerateKeyParameters from .storage_account_update_parameters import StorageAccountUpdateParameters from .usage_name import UsageName from .usage import Usage from .resource import Resource from .account_sas_parameters import AccountSasParameters from .list_account_sas_response import ListAccountSasResponse from .service_sas_parameters import ServiceSasParameters from .list_service_sas_response import ListServiceSasResponse from .operation_paged import OperationPaged from .sku_paged import SkuPaged from .storage_account_paged import StorageAccountPaged from .usage_paged import UsagePaged from .storage_management_client_enums import ( ReasonCode, SkuName, SkuTier, Kind, Reason, KeySource, Action, State, Bypass, DefaultAction, AccessTier, ProvisioningState, AccountStatus, KeyPermission, UsageUnit, Services, SignedResourceTypes, Permissions, HttpProtocol, SignedResource, ) __all__ = [ 'OperationDisplay', 'Dimension', 'MetricSpecification', 'ServiceSpecification', 'Operation', 'StorageAccountCheckNameAvailabilityParameters', 'SKUCapability', 'Restriction', 'Sku', 'CheckNameAvailabilityResult', 'CustomDomain', 'EncryptionService', 'EncryptionServices', 'KeyVaultProperties', 'Encryption', 'VirtualNetworkRule', 'IPRule', 'NetworkRuleSet', 'Identity', 'StorageAccountCreateParameters', 'Endpoints', 'StorageAccount', 'StorageAccountKey', 'StorageAccountListKeysResult', 'StorageAccountRegenerateKeyParameters', 'StorageAccountUpdateParameters', 'UsageName', 'Usage', 'Resource', 'AccountSasParameters', 'ListAccountSasResponse', 'ServiceSasParameters', 'ListServiceSasResponse', 'OperationPaged', 'SkuPaged', 'StorageAccountPaged', 'UsagePaged', 'ReasonCode', 'SkuName', 'SkuTier', 'Kind', 'Reason', 'KeySource', 'Action', 'State', 'Bypass', 'DefaultAction', 'AccessTier', 'ProvisioningState', 'AccountStatus', 'KeyPermission', 'UsageUnit', 'Services', 'SignedResourceTypes', 'Permissions', 'HttpProtocol', 'SignedResource', ]
mit
DrMeers/django
django/contrib/admin/views/main.py
5
17274
from collections import OrderedDict import sys import warnings from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured from django.core.paginator import InvalidPage from django.core.urlresolvers import reverse from django.db import models from django.db.models.fields import FieldDoesNotExist from django.utils import six from django.utils.deprecation import RenameMethodsBase from django.utils.encoding import force_text from django.utils.translation import ugettext, ugettext_lazy from django.utils.http import urlencode from django.contrib.admin import FieldListFilter from django.contrib.admin.exceptions import DisallowedModelAdminLookup from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR, TO_FIELD_VAR from django.contrib.admin.utils import (quote, get_fields_from_path, lookup_needs_distinct, prepare_lookup_value) # Changelist settings ALL_VAR = 'all' ORDER_VAR = 'o' ORDER_TYPE_VAR = 'ot' PAGE_VAR = 'p' SEARCH_VAR = 'q' ERROR_FLAG = 'e' IGNORED_PARAMS = ( ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR) # Text to display within change-list table cells if the value is blank. EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)') def _is_changelist_popup(request): """ Returns True if the popup GET parameter is set. This function is introduced to facilitate deprecating the legacy value for IS_POPUP_VAR and should be removed at the end of the deprecation cycle. """ if IS_POPUP_VAR in request.GET: return True IS_LEGACY_POPUP_VAR = 'pop' if IS_LEGACY_POPUP_VAR in request.GET: warnings.warn( "The `%s` GET parameter has been renamed to `%s`." % (IS_LEGACY_POPUP_VAR, IS_POPUP_VAR), DeprecationWarning, 2) return True return False class RenameChangeListMethods(RenameMethodsBase): renamed_methods = ( ('get_query_set', 'get_queryset', DeprecationWarning), ) class ChangeList(six.with_metaclass(RenameChangeListMethods)): def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_max_show_all, list_editable, model_admin): self.model = model self.opts = model._meta self.lookup_opts = self.opts self.root_queryset = model_admin.get_queryset(request) self.list_display = list_display self.list_display_links = list_display_links self.list_filter = list_filter self.date_hierarchy = date_hierarchy self.search_fields = search_fields self.list_select_related = list_select_related self.list_per_page = list_per_page self.list_max_show_all = list_max_show_all self.model_admin = model_admin self.preserved_filters = model_admin.get_preserved_filters(request) # Get search parameters from the query string. try: self.page_num = int(request.GET.get(PAGE_VAR, 0)) except ValueError: self.page_num = 0 self.show_all = ALL_VAR in request.GET self.is_popup = _is_changelist_popup(request) self.to_field = request.GET.get(TO_FIELD_VAR) self.params = dict(request.GET.items()) if PAGE_VAR in self.params: del self.params[PAGE_VAR] if ERROR_FLAG in self.params: del self.params[ERROR_FLAG] if self.is_popup: self.list_editable = () else: self.list_editable = list_editable self.query = request.GET.get(SEARCH_VAR, '') self.queryset = self.get_queryset(request) self.get_results(request) if self.is_popup: title = ugettext('Select %s') else: title = ugettext('Select %s to change') self.title = title % force_text(self.opts.verbose_name) self.pk_attname = self.lookup_opts.pk.attname @property def root_query_set(self): warnings.warn("`ChangeList.root_query_set` is deprecated, " "use `root_queryset` instead.", DeprecationWarning, 2) return self.root_queryset @property def query_set(self): warnings.warn("`ChangeList.query_set` is deprecated, " "use `queryset` instead.", DeprecationWarning, 2) return self.queryset def get_filters_params(self, params=None): """ Returns all params except IGNORED_PARAMS """ if not params: params = self.params lookup_params = params.copy() # a dictionary of the query string # Remove all the parameters that are globally and systematically # ignored. for ignored in IGNORED_PARAMS: if ignored in lookup_params: del lookup_params[ignored] return lookup_params def get_filters(self, request): lookup_params = self.get_filters_params() use_distinct = False for key, value in lookup_params.items(): if not self.model_admin.lookup_allowed(key, value): raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key) filter_specs = [] if self.list_filter: for list_filter in self.list_filter: if callable(list_filter): # This is simply a custom list filter class. spec = list_filter(request, lookup_params, self.model, self.model_admin) else: field_path = None if isinstance(list_filter, (tuple, list)): # This is a custom FieldListFilter class for a given field. field, field_list_filter_class = list_filter else: # This is simply a field name, so use the default # FieldListFilter class that has been registered for # the type of the given field. field, field_list_filter_class = list_filter, FieldListFilter.create if not isinstance(field, models.Field): field_path = field field = get_fields_from_path(self.model, field_path)[-1] spec = field_list_filter_class(field, request, lookup_params, self.model, self.model_admin, field_path=field_path) # Check if we need to use distinct() use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)) if spec and spec.has_output(): filter_specs.append(spec) # At this point, all the parameters used by the various ListFilters # have been removed from lookup_params, which now only contains other # parameters passed via the query string. We now loop through the # remaining parameters both to ensure that all the parameters are valid # fields and to determine if at least one of them needs distinct(). If # the lookup parameters aren't real fields, then bail out. try: for key, value in lookup_params.items(): lookup_params[key] = prepare_lookup_value(key, value) use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, key)) return filter_specs, bool(filter_specs), lookup_params, use_distinct except FieldDoesNotExist as e: six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2]) def get_query_string(self, new_params=None, remove=None): if new_params is None: new_params = {} if remove is None: remove = [] p = self.params.copy() for r in remove: for k in list(p): if k.startswith(r): del p[k] for k, v in new_params.items(): if v is None: if k in p: del p[k] else: p[k] = v return '?%s' % urlencode(sorted(p.items())) def get_results(self, request): paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page) # Get the number of objects, with admin filters applied. result_count = paginator.count # Get the total number of objects, with no admin filters applied. # Perform a slight optimization: # full_result_count is equal to paginator.count if no filters # were applied if self.get_filters_params() or self.params.get(SEARCH_VAR): full_result_count = self.root_queryset.count() else: full_result_count = result_count can_show_all = result_count <= self.list_max_show_all multi_page = result_count > self.list_per_page # Get the list of objects to display on this page. if (self.show_all and can_show_all) or not multi_page: result_list = self.queryset._clone() else: try: result_list = paginator.page(self.page_num + 1).object_list except InvalidPage: raise IncorrectLookupParameters self.result_count = result_count self.full_result_count = full_result_count self.result_list = result_list self.can_show_all = can_show_all self.multi_page = multi_page self.paginator = paginator def _get_default_ordering(self): ordering = [] if self.model_admin.ordering: ordering = self.model_admin.ordering elif self.lookup_opts.ordering: ordering = self.lookup_opts.ordering return ordering def get_ordering_field(self, field_name): """ Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched. """ try: field = self.lookup_opts.get_field(field_name) return field.name except models.FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, 'admin_order_field', None) def get_ordering(self, request, queryset): """ Returns the list of ordering fields for the change list. First we check the get_ordering() method in model admin, then we check the object's default ordering. Then, any manually-specified ordering from the query string overrides anything. Finally, a deterministic order is guaranteed by ensuring the primary key is used as the last ordering field. """ params = self.params ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering()) if ORDER_VAR in params: # Clear ordering and used params ordering = [] order_params = params[ORDER_VAR].split('.') for p in order_params: try: none, pfx, idx = p.rpartition('-') field_name = self.list_display[int(idx)] order_field = self.get_ordering_field(field_name) if not order_field: continue # No 'admin_order_field', skip it ordering.append(pfx + order_field) except (IndexError, ValueError): continue # Invalid ordering specified, skip it. # Add the given query's ordering fields, if any. ordering.extend(queryset.query.order_by) # Ensure that the primary key is systematically present in the list of # ordering fields so we can guarantee a deterministic order across all # database backends. pk_name = self.lookup_opts.pk.name if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])): # The two sets do not intersect, meaning the pk isn't present. So # we add it. ordering.append('-pk') return ordering def get_ordering_field_columns(self): """ Returns an OrderedDict of ordering field column numbers and asc/desc """ # We must cope with more than one column having the same underlying sort # field, so we base things on column numbers. ordering = self._get_default_ordering() ordering_fields = OrderedDict() if ORDER_VAR not in self.params: # for ordering specified on ModelAdmin or model Meta, we don't know # the right column numbers absolutely, because there might be more # than one column associated with that ordering, so we guess. for field in ordering: if field.startswith('-'): field = field[1:] order_type = 'desc' else: order_type = 'asc' for index, attr in enumerate(self.list_display): if self.get_ordering_field(attr) == field: ordering_fields[index] = order_type break else: for p in self.params[ORDER_VAR].split('.'): none, pfx, idx = p.rpartition('-') try: idx = int(idx) except ValueError: continue # skip it ordering_fields[idx] = 'desc' if pfx == '-' else 'asc' return ordering_fields def get_queryset(self, request): # First, we collect all the declared list filters. (self.filter_specs, self.has_filters, remaining_lookup_params, filters_use_distinct) = self.get_filters(request) # Then, we let every list filter modify the queryset to its liking. qs = self.root_queryset for filter_spec in self.filter_specs: new_qs = filter_spec.queryset(request, qs) if new_qs is not None: qs = new_qs try: # Finally, we apply the remaining lookup parameters from the query # string (i.e. those that haven't already been processed by the # filters). qs = qs.filter(**remaining_lookup_params) except (SuspiciousOperation, ImproperlyConfigured): # Allow certain types of errors to be re-raised as-is so that the # caller can treat them in a special way. raise except Exception as e: # Every other error is caught with a naked except, because we don't # have any other way of validating lookup parameters. They might be # invalid if the keyword arguments are incorrect, or if the values # are not in the correct type, so we might get FieldError, # ValueError, ValidationError, or ?. raise IncorrectLookupParameters(e) if not qs.query.select_related: qs = self.apply_select_related(qs) # Set ordering. ordering = self.get_ordering(request, qs) qs = qs.order_by(*ordering) # Apply search results qs, search_use_distinct = self.model_admin.get_search_results( request, qs, self.query) # Remove duplicates from results, if necessary if filters_use_distinct | search_use_distinct: return qs.distinct() else: return qs def apply_select_related(self, qs): if self.list_select_related is True: return qs.select_related() if self.list_select_related is False: if self.has_related_field_in_list_display(): return qs.select_related() if self.list_select_related: return qs.select_related(*self.list_select_related) return qs def has_related_field_in_list_display(self): for field_name in self.list_display: try: field = self.lookup_opts.get_field(field_name) except models.FieldDoesNotExist: pass else: if isinstance(field.rel, models.ManyToOneRel): return True return False def url_for_result(self, result): pk = getattr(result, self.pk_attname) return reverse('admin:%s_%s_change' % (self.opts.app_label, self.opts.model_name), args=(quote(pk),), current_app=self.model_admin.admin_site.name)
bsd-3-clause
ramineni/myironic
ironic/tests/conf_fixture.py
11
1431
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from oslo_config import cfg from ironic.common import config CONF = cfg.CONF CONF.import_opt('host', 'ironic.common.service') class ConfFixture(fixtures.Fixture): """Fixture to manage global conf settings.""" def __init__(self, conf): self.conf = conf def setUp(self): super(ConfFixture, self).setUp() self.conf.set_default('host', 'fake-mini') self.conf.set_default('connection', "sqlite://", group='database') self.conf.set_default('sqlite_synchronous', False, group='database') self.conf.set_default('verbose', True) config.parse_args([], default_config_files=[]) self.addCleanup(self.conf.reset)
apache-2.0
Sumith1896/sympy
sympy/polys/compatibility.py
96
56525
"""Compatibility interface between dense and sparse polys. """ from __future__ import print_function, division from sympy.polys.densearith import dup_add_term from sympy.polys.densearith import dmp_add_term from sympy.polys.densearith import dup_sub_term from sympy.polys.densearith import dmp_sub_term from sympy.polys.densearith import dup_mul_term from sympy.polys.densearith import dmp_mul_term from sympy.polys.densearith import dup_add_ground from sympy.polys.densearith import dmp_add_ground from sympy.polys.densearith import dup_sub_ground from sympy.polys.densearith import dmp_sub_ground from sympy.polys.densearith import dup_mul_ground from sympy.polys.densearith import dmp_mul_ground from sympy.polys.densearith import dup_quo_ground from sympy.polys.densearith import dmp_quo_ground from sympy.polys.densearith import dup_exquo_ground from sympy.polys.densearith import dmp_exquo_ground from sympy.polys.densearith import dup_lshift from sympy.polys.densearith import dup_rshift from sympy.polys.densearith import dup_abs from sympy.polys.densearith import dmp_abs from sympy.polys.densearith import dup_neg from sympy.polys.densearith import dmp_neg from sympy.polys.densearith import dup_add from sympy.polys.densearith import dmp_add from sympy.polys.densearith import dup_sub from sympy.polys.densearith import dmp_sub from sympy.polys.densearith import dup_add_mul from sympy.polys.densearith import dmp_add_mul from sympy.polys.densearith import dup_sub_mul from sympy.polys.densearith import dmp_sub_mul from sympy.polys.densearith import dup_mul from sympy.polys.densearith import dmp_mul from sympy.polys.densearith import dup_sqr from sympy.polys.densearith import dmp_sqr from sympy.polys.densearith import dup_pow from sympy.polys.densearith import dmp_pow from sympy.polys.densearith import dup_pdiv from sympy.polys.densearith import dup_prem from sympy.polys.densearith import dup_pquo from sympy.polys.densearith import dup_pexquo from sympy.polys.densearith import dmp_pdiv from sympy.polys.densearith import dmp_prem from sympy.polys.densearith import dmp_pquo from sympy.polys.densearith import dmp_pexquo from sympy.polys.densearith import dup_rr_div from sympy.polys.densearith import dmp_rr_div from sympy.polys.densearith import dup_ff_div from sympy.polys.densearith import dmp_ff_div from sympy.polys.densearith import dup_div from sympy.polys.densearith import dup_rem from sympy.polys.densearith import dup_quo from sympy.polys.densearith import dup_exquo from sympy.polys.densearith import dmp_div from sympy.polys.densearith import dmp_rem from sympy.polys.densearith import dmp_quo from sympy.polys.densearith import dmp_exquo from sympy.polys.densearith import dup_max_norm from sympy.polys.densearith import dmp_max_norm from sympy.polys.densearith import dup_l1_norm from sympy.polys.densearith import dmp_l1_norm from sympy.polys.densearith import dup_expand from sympy.polys.densearith import dmp_expand from sympy.polys.densebasic import dup_LC from sympy.polys.densebasic import dmp_LC from sympy.polys.densebasic import dup_TC from sympy.polys.densebasic import dmp_TC from sympy.polys.densebasic import dmp_ground_LC from sympy.polys.densebasic import dmp_ground_TC from sympy.polys.densebasic import dup_degree from sympy.polys.densebasic import dmp_degree from sympy.polys.densebasic import dmp_degree_in from sympy.polys.densebasic import dmp_to_dict from sympy.polys.densetools import dup_integrate from sympy.polys.densetools import dmp_integrate from sympy.polys.densetools import dmp_integrate_in from sympy.polys.densetools import dup_diff from sympy.polys.densetools import dmp_diff from sympy.polys.densetools import dmp_diff_in from sympy.polys.densetools import dup_eval from sympy.polys.densetools import dmp_eval from sympy.polys.densetools import dmp_eval_in from sympy.polys.densetools import dmp_eval_tail from sympy.polys.densetools import dmp_diff_eval_in from sympy.polys.densetools import dup_trunc from sympy.polys.densetools import dmp_trunc from sympy.polys.densetools import dmp_ground_trunc from sympy.polys.densetools import dup_monic from sympy.polys.densetools import dmp_ground_monic from sympy.polys.densetools import dup_content from sympy.polys.densetools import dmp_ground_content from sympy.polys.densetools import dup_primitive from sympy.polys.densetools import dmp_ground_primitive from sympy.polys.densetools import dup_extract from sympy.polys.densetools import dmp_ground_extract from sympy.polys.densetools import dup_real_imag from sympy.polys.densetools import dup_mirror from sympy.polys.densetools import dup_scale from sympy.polys.densetools import dup_shift from sympy.polys.densetools import dup_transform from sympy.polys.densetools import dup_compose from sympy.polys.densetools import dmp_compose from sympy.polys.densetools import dup_decompose from sympy.polys.densetools import dmp_lift from sympy.polys.densetools import dup_sign_variations from sympy.polys.densetools import dup_clear_denoms from sympy.polys.densetools import dmp_clear_denoms from sympy.polys.densetools import dup_revert from sympy.polys.euclidtools import dup_half_gcdex from sympy.polys.euclidtools import dmp_half_gcdex from sympy.polys.euclidtools import dup_gcdex from sympy.polys.euclidtools import dmp_gcdex from sympy.polys.euclidtools import dup_invert from sympy.polys.euclidtools import dmp_invert from sympy.polys.euclidtools import dup_euclidean_prs from sympy.polys.euclidtools import dmp_euclidean_prs from sympy.polys.euclidtools import dup_primitive_prs from sympy.polys.euclidtools import dmp_primitive_prs from sympy.polys.euclidtools import dup_inner_subresultants from sympy.polys.euclidtools import dup_subresultants from sympy.polys.euclidtools import dup_prs_resultant from sympy.polys.euclidtools import dup_resultant from sympy.polys.euclidtools import dmp_inner_subresultants from sympy.polys.euclidtools import dmp_subresultants from sympy.polys.euclidtools import dmp_prs_resultant from sympy.polys.euclidtools import dmp_zz_modular_resultant from sympy.polys.euclidtools import dmp_zz_collins_resultant from sympy.polys.euclidtools import dmp_qq_collins_resultant from sympy.polys.euclidtools import dmp_resultant from sympy.polys.euclidtools import dup_discriminant from sympy.polys.euclidtools import dmp_discriminant from sympy.polys.euclidtools import dup_rr_prs_gcd from sympy.polys.euclidtools import dup_ff_prs_gcd from sympy.polys.euclidtools import dmp_rr_prs_gcd from sympy.polys.euclidtools import dmp_ff_prs_gcd from sympy.polys.euclidtools import dup_zz_heu_gcd from sympy.polys.euclidtools import dmp_zz_heu_gcd from sympy.polys.euclidtools import dup_qq_heu_gcd from sympy.polys.euclidtools import dmp_qq_heu_gcd from sympy.polys.euclidtools import dup_inner_gcd from sympy.polys.euclidtools import dmp_inner_gcd from sympy.polys.euclidtools import dup_gcd from sympy.polys.euclidtools import dmp_gcd from sympy.polys.euclidtools import dup_rr_lcm from sympy.polys.euclidtools import dup_ff_lcm from sympy.polys.euclidtools import dup_lcm from sympy.polys.euclidtools import dmp_rr_lcm from sympy.polys.euclidtools import dmp_ff_lcm from sympy.polys.euclidtools import dmp_lcm from sympy.polys.euclidtools import dmp_content from sympy.polys.euclidtools import dmp_primitive from sympy.polys.euclidtools import dup_cancel from sympy.polys.euclidtools import dmp_cancel from sympy.polys.factortools import dup_trial_division from sympy.polys.factortools import dmp_trial_division from sympy.polys.factortools import dup_zz_mignotte_bound from sympy.polys.factortools import dmp_zz_mignotte_bound from sympy.polys.factortools import dup_zz_hensel_step from sympy.polys.factortools import dup_zz_hensel_lift from sympy.polys.factortools import dup_zz_zassenhaus from sympy.polys.factortools import dup_zz_irreducible_p from sympy.polys.factortools import dup_cyclotomic_p from sympy.polys.factortools import dup_zz_cyclotomic_poly from sympy.polys.factortools import dup_zz_cyclotomic_factor from sympy.polys.factortools import dup_zz_factor_sqf from sympy.polys.factortools import dup_zz_factor from sympy.polys.factortools import dmp_zz_wang_non_divisors from sympy.polys.factortools import dmp_zz_wang_lead_coeffs from sympy.polys.factortools import dup_zz_diophantine from sympy.polys.factortools import dmp_zz_diophantine from sympy.polys.factortools import dmp_zz_wang_hensel_lifting from sympy.polys.factortools import dmp_zz_wang from sympy.polys.factortools import dmp_zz_factor from sympy.polys.factortools import dup_ext_factor from sympy.polys.factortools import dmp_ext_factor from sympy.polys.factortools import dup_gf_factor from sympy.polys.factortools import dmp_gf_factor from sympy.polys.factortools import dup_factor_list from sympy.polys.factortools import dup_factor_list_include from sympy.polys.factortools import dmp_factor_list from sympy.polys.factortools import dmp_factor_list_include from sympy.polys.factortools import dup_irreducible_p from sympy.polys.factortools import dmp_irreducible_p from sympy.polys.rootisolation import dup_sturm from sympy.polys.rootisolation import dup_root_upper_bound from sympy.polys.rootisolation import dup_root_lower_bound from sympy.polys.rootisolation import dup_step_refine_real_root from sympy.polys.rootisolation import dup_inner_refine_real_root from sympy.polys.rootisolation import dup_outer_refine_real_root from sympy.polys.rootisolation import dup_refine_real_root from sympy.polys.rootisolation import dup_inner_isolate_real_roots from sympy.polys.rootisolation import dup_inner_isolate_positive_roots from sympy.polys.rootisolation import dup_inner_isolate_negative_roots from sympy.polys.rootisolation import dup_isolate_real_roots_sqf from sympy.polys.rootisolation import dup_isolate_real_roots from sympy.polys.rootisolation import dup_isolate_real_roots_list from sympy.polys.rootisolation import dup_count_real_roots from sympy.polys.rootisolation import dup_count_complex_roots from sympy.polys.rootisolation import dup_isolate_complex_roots_sqf from sympy.polys.rootisolation import dup_isolate_all_roots_sqf from sympy.polys.rootisolation import dup_isolate_all_roots from sympy.polys.sqfreetools import ( dup_sqf_p, dmp_sqf_p, dup_sqf_norm, dmp_sqf_norm, dup_gf_sqf_part, dmp_gf_sqf_part, dup_sqf_part, dmp_sqf_part, dup_gf_sqf_list, dmp_gf_sqf_list, dup_sqf_list, dup_sqf_list_include, dmp_sqf_list, dmp_sqf_list_include, dup_gff_list, dmp_gff_list) from sympy.polys.galoistools import ( gf_degree, gf_LC, gf_TC, gf_strip, gf_from_dict, gf_to_dict, gf_from_int_poly, gf_to_int_poly, gf_neg, gf_add_ground, gf_sub_ground, gf_mul_ground, gf_quo_ground, gf_add, gf_sub, gf_mul, gf_sqr, gf_add_mul, gf_sub_mul, gf_expand, gf_div, gf_rem, gf_quo, gf_exquo, gf_lshift, gf_rshift, gf_pow, gf_pow_mod, gf_gcd, gf_lcm, gf_cofactors, gf_gcdex, gf_monic, gf_diff, gf_eval, gf_multi_eval, gf_compose, gf_compose_mod, gf_trace_map, gf_random, gf_irreducible, gf_irred_p_ben_or, gf_irred_p_rabin, gf_irreducible_p, gf_sqf_p, gf_sqf_part, gf_Qmatrix, gf_berlekamp, gf_ddf_zassenhaus, gf_edf_zassenhaus, gf_ddf_shoup, gf_edf_shoup, gf_zassenhaus, gf_shoup, gf_factor_sqf, gf_factor) from sympy.utilities import public @public class IPolys(object): symbols = None ngens = None domain = None order = None gens = None def drop(self, gen): pass def clone(self, symbols=None, domain=None, order=None): pass def to_ground(self): pass def ground_new(self, element): pass def domain_new(self, element): pass def from_dict(self, d): pass def wrap(self, element): from sympy.polys.rings import PolyElement if isinstance(element, PolyElement): if element.ring == self: return element else: raise NotImplementedError("domain conversions") else: return self.ground_new(element) def to_dense(self, element): return self.wrap(element).to_dense() def from_dense(self, element): return self.from_dict(dmp_to_dict(element, self.ngens-1, self.domain)) def dup_add_term(self, f, c, i): return self.from_dense(dup_add_term(self.to_dense(f), c, i, self.domain)) def dmp_add_term(self, f, c, i): return self.from_dense(dmp_add_term(self.to_dense(f), self.wrap(c).drop(0).to_dense(), i, self.ngens-1, self.domain)) def dup_sub_term(self, f, c, i): return self.from_dense(dup_sub_term(self.to_dense(f), c, i, self.domain)) def dmp_sub_term(self, f, c, i): return self.from_dense(dmp_sub_term(self.to_dense(f), self.wrap(c).drop(0).to_dense(), i, self.ngens-1, self.domain)) def dup_mul_term(self, f, c, i): return self.from_dense(dup_mul_term(self.to_dense(f), c, i, self.domain)) def dmp_mul_term(self, f, c, i): return self.from_dense(dmp_mul_term(self.to_dense(f), self.wrap(c).drop(0).to_dense(), i, self.ngens-1, self.domain)) def dup_add_ground(self, f, c): return self.from_dense(dup_add_ground(self.to_dense(f), c, self.domain)) def dmp_add_ground(self, f, c): return self.from_dense(dmp_add_ground(self.to_dense(f), c, self.ngens-1, self.domain)) def dup_sub_ground(self, f, c): return self.from_dense(dup_sub_ground(self.to_dense(f), c, self.domain)) def dmp_sub_ground(self, f, c): return self.from_dense(dmp_sub_ground(self.to_dense(f), c, self.ngens-1, self.domain)) def dup_mul_ground(self, f, c): return self.from_dense(dup_mul_ground(self.to_dense(f), c, self.domain)) def dmp_mul_ground(self, f, c): return self.from_dense(dmp_mul_ground(self.to_dense(f), c, self.ngens-1, self.domain)) def dup_quo_ground(self, f, c): return self.from_dense(dup_quo_ground(self.to_dense(f), c, self.domain)) def dmp_quo_ground(self, f, c): return self.from_dense(dmp_quo_ground(self.to_dense(f), c, self.ngens-1, self.domain)) def dup_exquo_ground(self, f, c): return self.from_dense(dup_exquo_ground(self.to_dense(f), c, self.domain)) def dmp_exquo_ground(self, f, c): return self.from_dense(dmp_exquo_ground(self.to_dense(f), c, self.ngens-1, self.domain)) def dup_lshift(self, f, n): return self.from_dense(dup_lshift(self.to_dense(f), n, self.domain)) def dup_rshift(self, f, n): return self.from_dense(dup_rshift(self.to_dense(f), n, self.domain)) def dup_abs(self, f): return self.from_dense(dup_abs(self.to_dense(f), self.domain)) def dmp_abs(self, f): return self.from_dense(dmp_abs(self.to_dense(f), self.ngens-1, self.domain)) def dup_neg(self, f): return self.from_dense(dup_neg(self.to_dense(f), self.domain)) def dmp_neg(self, f): return self.from_dense(dmp_neg(self.to_dense(f), self.ngens-1, self.domain)) def dup_add(self, f, g): return self.from_dense(dup_add(self.to_dense(f), self.to_dense(g), self.domain)) def dmp_add(self, f, g): return self.from_dense(dmp_add(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dup_sub(self, f, g): return self.from_dense(dup_sub(self.to_dense(f), self.to_dense(g), self.domain)) def dmp_sub(self, f, g): return self.from_dense(dmp_sub(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dup_add_mul(self, f, g, h): return self.from_dense(dup_add_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.domain)) def dmp_add_mul(self, f, g, h): return self.from_dense(dmp_add_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.ngens-1, self.domain)) def dup_sub_mul(self, f, g, h): return self.from_dense(dup_sub_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.domain)) def dmp_sub_mul(self, f, g, h): return self.from_dense(dmp_sub_mul(self.to_dense(f), self.to_dense(g), self.to_dense(h), self.ngens-1, self.domain)) def dup_mul(self, f, g): return self.from_dense(dup_mul(self.to_dense(f), self.to_dense(g), self.domain)) def dmp_mul(self, f, g): return self.from_dense(dmp_mul(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dup_sqr(self, f): return self.from_dense(dup_sqr(self.to_dense(f), self.domain)) def dmp_sqr(self, f): return self.from_dense(dmp_sqr(self.to_dense(f), self.ngens-1, self.domain)) def dup_pow(self, f, n): return self.from_dense(dup_pow(self.to_dense(f), n, self.domain)) def dmp_pow(self, f, n): return self.from_dense(dmp_pow(self.to_dense(f), n, self.ngens-1, self.domain)) def dup_pdiv(self, f, g): q, r = dup_pdiv(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(q), self.from_dense(r)) def dup_prem(self, f, g): return self.from_dense(dup_prem(self.to_dense(f), self.to_dense(g), self.domain)) def dup_pquo(self, f, g): return self.from_dense(dup_pquo(self.to_dense(f), self.to_dense(g), self.domain)) def dup_pexquo(self, f, g): return self.from_dense(dup_pexquo(self.to_dense(f), self.to_dense(g), self.domain)) def dmp_pdiv(self, f, g): q, r = dmp_pdiv(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(q), self.from_dense(r)) def dmp_prem(self, f, g): return self.from_dense(dmp_prem(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dmp_pquo(self, f, g): return self.from_dense(dmp_pquo(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dmp_pexquo(self, f, g): return self.from_dense(dmp_pexquo(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dup_rr_div(self, f, g): q, r = dup_rr_div(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(q), self.from_dense(r)) def dmp_rr_div(self, f, g): q, r = dmp_rr_div(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(q), self.from_dense(r)) def dup_ff_div(self, f, g): q, r = dup_ff_div(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(q), self.from_dense(r)) def dmp_ff_div(self, f, g): q, r = dmp_ff_div(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(q), self.from_dense(r)) def dup_div(self, f, g): q, r = dup_div(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(q), self.from_dense(r)) def dup_rem(self, f, g): return self.from_dense(dup_rem(self.to_dense(f), self.to_dense(g), self.domain)) def dup_quo(self, f, g): return self.from_dense(dup_quo(self.to_dense(f), self.to_dense(g), self.domain)) def dup_exquo(self, f, g): return self.from_dense(dup_exquo(self.to_dense(f), self.to_dense(g), self.domain)) def dmp_div(self, f, g): q, r = dmp_div(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(q), self.from_dense(r)) def dmp_rem(self, f, g): return self.from_dense(dmp_rem(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dmp_quo(self, f, g): return self.from_dense(dmp_quo(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dmp_exquo(self, f, g): return self.from_dense(dmp_exquo(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dup_max_norm(self, f): return dup_max_norm(self.to_dense(f), self.domain) def dmp_max_norm(self, f): return dmp_max_norm(self.to_dense(f), self.ngens-1, self.domain) def dup_l1_norm(self, f): return dup_l1_norm(self.to_dense(f), self.domain) def dmp_l1_norm(self, f): return dmp_l1_norm(self.to_dense(f), self.ngens-1, self.domain) def dup_expand(self, polys): return self.from_dense(dup_expand(list(map(self.to_dense, polys)), self.domain)) def dmp_expand(self, polys): return self.from_dense(dmp_expand(list(map(self.to_dense, polys)), self.ngens-1, self.domain)) def dup_LC(self, f): return dup_LC(self.to_dense(f), self.domain) def dmp_LC(self, f): LC = dmp_LC(self.to_dense(f), self.domain) if isinstance(LC, list): return self[1:].from_dense(LC) else: return LC def dup_TC(self, f): return dup_TC(self.to_dense(f), self.domain) def dmp_TC(self, f): TC = dmp_TC(self.to_dense(f), self.domain) if isinstance(TC, list): return self[1:].from_dense(TC) else: return TC def dmp_ground_LC(self, f): return dmp_ground_LC(self.to_dense(f), self.ngens-1, self.domain) def dmp_ground_TC(self, f): return dmp_ground_TC(self.to_dense(f), self.ngens-1, self.domain) def dup_degree(self, f): return dup_degree(self.to_dense(f)) def dmp_degree(self, f): return dmp_degree(self.to_dense(f), self.ngens-1) def dmp_degree_in(self, f, j): return dmp_degree_in(self.to_dense(f), j, self.ngens-1) def dup_integrate(self, f, m): return self.from_dense(dup_integrate(self.to_dense(f), m, self.domain)) def dmp_integrate(self, f, m): return self.from_dense(dmp_integrate(self.to_dense(f), m, self.ngens-1, self.domain)) def dup_diff(self, f, m): return self.from_dense(dup_diff(self.to_dense(f), m, self.domain)) def dmp_diff(self, f, m): return self.from_dense(dmp_diff(self.to_dense(f), m, self.ngens-1, self.domain)) def dmp_diff_in(self, f, m, j): return self.from_dense(dmp_diff_in(self.to_dense(f), m, j, self.ngens-1, self.domain)) def dmp_integrate_in(self, f, m, j): return self.from_dense(dmp_integrate_in(self.to_dense(f), m, j, self.ngens-1, self.domain)) def dup_eval(self, f, a): return dup_eval(self.to_dense(f), a, self.domain) def dmp_eval(self, f, a): result = dmp_eval(self.to_dense(f), a, self.ngens-1, self.domain) return self[1:].from_dense(result) def dmp_eval_in(self, f, a, j): result = dmp_eval_in(self.to_dense(f), a, j, self.ngens-1, self.domain) return self.drop(j).from_dense(result) def dmp_diff_eval_in(self, f, m, a, j): result = dmp_diff_eval_in(self.to_dense(f), m, a, j, self.ngens-1, self.domain) return self.drop(j).from_dense(result) def dmp_eval_tail(self, f, A): result = dmp_eval_tail(self.to_dense(f), A, self.ngens-1, self.domain) if isinstance(result, list): return self[:-len(A)].from_dense(result) else: return result def dup_trunc(self, f, p): return self.from_dense(dup_trunc(self.to_dense(f), p, self.domain)) def dmp_trunc(self, f, g): return self.from_dense(dmp_trunc(self.to_dense(f), self[1:].to_dense(g), self.ngens-1, self.domain)) def dmp_ground_trunc(self, f, p): return self.from_dense(dmp_ground_trunc(self.to_dense(f), p, self.ngens-1, self.domain)) def dup_monic(self, f): return self.from_dense(dup_monic(self.to_dense(f), self.domain)) def dmp_ground_monic(self, f): return self.from_dense(dmp_ground_monic(self.to_dense(f), self.ngens-1, self.domain)) def dup_extract(self, f, g): c, F, G = dup_extract(self.to_dense(f), self.to_dense(g), self.domain) return (c, self.from_dense(F), self.from_dense(G)) def dmp_ground_extract(self, f, g): c, F, G = dmp_ground_extract(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (c, self.from_dense(F), self.from_dense(G)) def dup_real_imag(self, f): p, q = dup_real_imag(self.wrap(f).drop(1).to_dense(), self.domain) return (self.from_dense(p), self.from_dense(q)) def dup_mirror(self, f): return self.from_dense(dup_mirror(self.to_dense(f), self.domain)) def dup_scale(self, f, a): return self.from_dense(dup_scale(self.to_dense(f), a, self.domain)) def dup_shift(self, f, a): return self.from_dense(dup_shift(self.to_dense(f), a, self.domain)) def dup_transform(self, f, p, q): return self.from_dense(dup_transform(self.to_dense(f), self.to_dense(p), self.to_dense(q), self.domain)) def dup_compose(self, f, g): return self.from_dense(dup_compose(self.to_dense(f), self.to_dense(g), self.domain)) def dmp_compose(self, f, g): return self.from_dense(dmp_compose(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dup_decompose(self, f): components = dup_decompose(self.to_dense(f), self.domain) return list(map(self.from_dense, components)) def dmp_lift(self, f): result = dmp_lift(self.to_dense(f), self.ngens-1, self.domain) return self.to_ground().from_dense(result) def dup_sign_variations(self, f): return dup_sign_variations(self.to_dense(f), self.domain) def dup_clear_denoms(self, f, convert=False): c, F = dup_clear_denoms(self.to_dense(f), self.domain, convert=convert) if convert: ring = self.clone(domain=self.domain.get_ring()) else: ring = self return (c, ring.from_dense(F)) def dmp_clear_denoms(self, f, convert=False): c, F = dmp_clear_denoms(self.to_dense(f), self.ngens-1, self.domain, convert=convert) if convert: ring = self.clone(domain=self.domain.get_ring()) else: ring = self return (c, ring.from_dense(F)) def dup_revert(self, f, n): return self.from_dense(dup_revert(self.to_dense(f), n, self.domain)) def dup_half_gcdex(self, f, g): s, h = dup_half_gcdex(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(s), self.from_dense(h)) def dmp_half_gcdex(self, f, g): s, h = dmp_half_gcdex(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(s), self.from_dense(h)) def dup_gcdex(self, f, g): s, t, h = dup_gcdex(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(s), self.from_dense(t), self.from_dense(h)) def dmp_gcdex(self, f, g): s, t, h = dmp_gcdex(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(s), self.from_dense(t), self.from_dense(h)) def dup_invert(self, f, g): return self.from_dense(dup_invert(self.to_dense(f), self.to_dense(g), self.domain)) def dmp_invert(self, f, g): return self.from_dense(dmp_invert(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain)) def dup_euclidean_prs(self, f, g): prs = dup_euclidean_prs(self.to_dense(f), self.to_dense(g), self.domain) return list(map(self.from_dense, prs)) def dmp_euclidean_prs(self, f, g): prs = dmp_euclidean_prs(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return list(map(self.from_dense, prs)) def dup_primitive_prs(self, f, g): prs = dup_primitive_prs(self.to_dense(f), self.to_dense(g), self.domain) return list(map(self.from_dense, prs)) def dmp_primitive_prs(self, f, g): prs = dmp_primitive_prs(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return list(map(self.from_dense, prs)) def dup_inner_subresultants(self, f, g): prs, sres = dup_inner_subresultants(self.to_dense(f), self.to_dense(g), self.domain) return (list(map(self.from_dense, prs)), sres) def dmp_inner_subresultants(self, f, g): prs, sres = dmp_inner_subresultants(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (list(map(self.from_dense, prs)), sres) def dup_subresultants(self, f, g): prs = dup_subresultants(self.to_dense(f), self.to_dense(g), self.domain) return list(map(self.from_dense, prs)) def dmp_subresultants(self, f, g): prs = dmp_subresultants(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return list(map(self.from_dense, prs)) def dup_prs_resultant(self, f, g): res, prs = dup_prs_resultant(self.to_dense(f), self.to_dense(g), self.domain) return (res, list(map(self.from_dense, prs))) def dmp_prs_resultant(self, f, g): res, prs = dmp_prs_resultant(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self[1:].from_dense(res), list(map(self.from_dense, prs))) def dmp_zz_modular_resultant(self, f, g, p): res = dmp_zz_modular_resultant(self.to_dense(f), self.to_dense(g), self.domain_new(p), self.ngens-1, self.domain) return self[1:].from_dense(res) def dmp_zz_collins_resultant(self, f, g): res = dmp_zz_collins_resultant(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return self[1:].from_dense(res) def dmp_qq_collins_resultant(self, f, g): res = dmp_qq_collins_resultant(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return self[1:].from_dense(res) def dup_resultant(self, f, g): #, includePRS=False): return dup_resultant(self.to_dense(f), self.to_dense(g), self.domain) #, includePRS=includePRS) def dmp_resultant(self, f, g): #, includePRS=False): res = dmp_resultant(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) #, includePRS=includePRS) if isinstance(res, list): return self[1:].from_dense(res) else: return res def dup_discriminant(self, f): return dup_discriminant(self.to_dense(f), self.domain) def dmp_discriminant(self, f): disc = dmp_discriminant(self.to_dense(f), self.ngens-1, self.domain) if isinstance(disc, list): return self[1:].from_dense(disc) else: return disc def dup_rr_prs_gcd(self, f, g): H, F, G = dup_rr_prs_gcd(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dup_ff_prs_gcd(self, f, g): H, F, G = dup_ff_prs_gcd(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dmp_rr_prs_gcd(self, f, g): H, F, G = dmp_rr_prs_gcd(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dmp_ff_prs_gcd(self, f, g): H, F, G = dmp_ff_prs_gcd(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dup_zz_heu_gcd(self, f, g): H, F, G = dup_zz_heu_gcd(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dmp_zz_heu_gcd(self, f, g): H, F, G = dmp_zz_heu_gcd(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dup_qq_heu_gcd(self, f, g): H, F, G = dup_qq_heu_gcd(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dmp_qq_heu_gcd(self, f, g): H, F, G = dmp_qq_heu_gcd(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dup_inner_gcd(self, f, g): H, F, G = dup_inner_gcd(self.to_dense(f), self.to_dense(g), self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dmp_inner_gcd(self, f, g): H, F, G = dmp_inner_gcd(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return (self.from_dense(H), self.from_dense(F), self.from_dense(G)) def dup_gcd(self, f, g): H = dup_gcd(self.to_dense(f), self.to_dense(g), self.domain) return self.from_dense(H) def dmp_gcd(self, f, g): H = dmp_gcd(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return self.from_dense(H) def dup_rr_lcm(self, f, g): H = dup_rr_lcm(self.to_dense(f), self.to_dense(g), self.domain) return self.from_dense(H) def dup_ff_lcm(self, f, g): H = dup_ff_lcm(self.to_dense(f), self.to_dense(g), self.domain) return self.from_dense(H) def dup_lcm(self, f, g): H = dup_lcm(self.to_dense(f), self.to_dense(g), self.domain) return self.from_dense(H) def dmp_rr_lcm(self, f, g): H = dmp_rr_lcm(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return self.from_dense(H) def dmp_ff_lcm(self, f, g): H = dmp_ff_lcm(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return self.from_dense(H) def dmp_lcm(self, f, g): H = dmp_lcm(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain) return self.from_dense(H) def dup_content(self, f): cont = dup_content(self.to_dense(f), self.domain) return cont def dup_primitive(self, f): cont, prim = dup_primitive(self.to_dense(f), self.domain) return cont, self.from_dense(prim) def dmp_content(self, f): cont = dmp_content(self.to_dense(f), self.ngens-1, self.domain) if isinstance(cont, list): return self[1:].from_dense(cont) else: return cont def dmp_primitive(self, f): cont, prim = dmp_primitive(self.to_dense(f), self.ngens-1, self.domain) if isinstance(cont, list): return (self[1:].from_dense(cont), self.from_dense(prim)) else: return (cont, self.from_dense(prim)) def dmp_ground_content(self, f): cont = dmp_ground_content(self.to_dense(f), self.ngens-1, self.domain) return cont def dmp_ground_primitive(self, f): cont, prim = dmp_ground_primitive(self.to_dense(f), self.ngens-1, self.domain) return (cont, self.from_dense(prim)) def dup_cancel(self, f, g, include=True): result = dup_cancel(self.to_dense(f), self.to_dense(g), self.domain, include=include) if not include: cf, cg, F, G = result return (cf, cg, self.from_dense(F), self.from_dense(G)) else: F, G = result return (self.from_dense(F), self.from_dense(G)) def dmp_cancel(self, f, g, include=True): result = dmp_cancel(self.to_dense(f), self.to_dense(g), self.ngens-1, self.domain, include=include) if not include: cf, cg, F, G = result return (cf, cg, self.from_dense(F), self.from_dense(G)) else: F, G = result return (self.from_dense(F), self.from_dense(G)) def dup_trial_division(self, f, factors): factors = dup_trial_division(self.to_dense(f), list(map(self.to_dense, factors)), self.domain) return [ (self.from_dense(g), k) for g, k in factors ] def dmp_trial_division(self, f, factors): factors = dmp_trial_division(self.to_dense(f), list(map(self.to_dense, factors)), self.ngens-1, self.domain) return [ (self.from_dense(g), k) for g, k in factors ] def dup_zz_mignotte_bound(self, f): return dup_zz_mignotte_bound(self.to_dense(f), self.domain) def dmp_zz_mignotte_bound(self, f): return dmp_zz_mignotte_bound(self.to_dense(f), self.ngens-1, self.domain) def dup_zz_hensel_step(self, m, f, g, h, s, t): D = self.to_dense G, H, S, T = dup_zz_hensel_step(m, D(f), D(g), D(h), D(s), D(t), self.domain) return (self.from_dense(G), self.from_dense(H), self.from_dense(S), self.from_dense(T)) def dup_zz_hensel_lift(self, p, f, f_list, l): D = self.to_dense polys = dup_zz_hensel_lift(p, D(f), list(map(D, f_list)), l, self.domain) return list(map(self.from_dense, polys)) def dup_zz_zassenhaus(self, f): factors = dup_zz_zassenhaus(self.to_dense(f), self.domain) return [ (self.from_dense(g), k) for g, k in factors ] def dup_zz_irreducible_p(self, f): return dup_zz_irreducible_p(self.to_dense(f), self.domain) def dup_cyclotomic_p(self, f, irreducible=False): return dup_cyclotomic_p(self.to_dense(f), self.domain, irreducible=irreducible) def dup_zz_cyclotomic_poly(self, n): F = dup_zz_cyclotomic_poly(n, self.domain) return self.from_dense(F) def dup_zz_cyclotomic_factor(self, f): result = dup_zz_cyclotomic_factor(self.to_dense(f), self.domain) if result is None: return result else: return list(map(self.from_dense, result)) # E: List[ZZ], cs: ZZ, ct: ZZ def dmp_zz_wang_non_divisors(self, E, cs, ct): return dmp_zz_wang_non_divisors(E, cs, ct, self.domain) # f: Poly, T: List[(Poly, int)], ct: ZZ, A: List[ZZ] #def dmp_zz_wang_test_points(f, T, ct, A): # dmp_zz_wang_test_points(self.to_dense(f), T, ct, A, self.ngens-1, self.domain) # f: Poly, T: List[(Poly, int)], cs: ZZ, E: List[ZZ], H: List[Poly], A: List[ZZ] def dmp_zz_wang_lead_coeffs(self, f, T, cs, E, H, A): mv = self[1:] T = [ (mv.to_dense(t), k) for t, k in T ] uv = self[:1] H = list(map(uv.to_dense, H)) f, HH, CC = dmp_zz_wang_lead_coeffs(self.to_dense(f), T, cs, E, H, A, self.ngens-1, self.domain) return self.from_dense(f), list(map(uv.from_dense, HH)), list(map(mv.from_dense, CC)) # f: List[Poly], m: int, p: ZZ def dup_zz_diophantine(self, F, m, p): result = dup_zz_diophantine(list(map(self.to_dense, F)), m, p, self.domain) return list(map(self.from_dense, result)) # f: List[Poly], c: List[Poly], A: List[ZZ], d: int, p: ZZ def dmp_zz_diophantine(self, F, c, A, d, p): result = dmp_zz_diophantine(list(map(self.to_dense, F)), self.to_dense(c), A, d, p, self.ngens-1, self.domain) return list(map(self.from_dense, result)) # f: Poly, H: List[Poly], LC: List[Poly], A: List[ZZ], p: ZZ def dmp_zz_wang_hensel_lifting(self, f, H, LC, A, p): uv = self[:1] mv = self[1:] H = list(map(uv.to_dense, H)) LC = list(map(mv.to_dense, LC)) result = dmp_zz_wang_hensel_lifting(self.to_dense(f), H, LC, A, p, self.ngens-1, self.domain) return list(map(self.from_dense, result)) def dmp_zz_wang(self, f, mod=None, seed=None): factors = dmp_zz_wang(self.to_dense(f), self.ngens-1, self.domain, mod=mod, seed=seed) return [ self.from_dense(g) for g in factors ] def dup_zz_factor_sqf(self, f): coeff, factors = dup_zz_factor_sqf(self.to_dense(f), self.domain) return (coeff, [ self.from_dense(g) for g in factors ]) def dup_zz_factor(self, f): coeff, factors = dup_zz_factor(self.to_dense(f), self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dmp_zz_factor(self, f): coeff, factors = dmp_zz_factor(self.to_dense(f), self.ngens-1, self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dup_ext_factor(self, f): coeff, factors = dup_ext_factor(self.to_dense(f), self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dmp_ext_factor(self, f): coeff, factors = dmp_ext_factor(self.to_dense(f), self.ngens-1, self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dup_gf_factor(self, f): coeff, factors = dup_gf_factor(self.to_dense(f), self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dmp_gf_factor(self, f): coeff, factors = dmp_gf_factor(self.to_dense(f), self.ngens-1, self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dup_factor_list(self, f): coeff, factors = dup_factor_list(self.to_dense(f), self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dup_factor_list_include(self, f): factors = dup_factor_list_include(self.to_dense(f), self.domain) return [ (self.from_dense(g), k) for g, k in factors ] def dmp_factor_list(self, f): coeff, factors = dmp_factor_list(self.to_dense(f), self.ngens-1, self.domain) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dmp_factor_list_include(self, f): factors = dmp_factor_list_include(self.to_dense(f), self.ngens-1, self.domain) return [ (self.from_dense(g), k) for g, k in factors ] def dup_irreducible_p(self, f): return dup_irreducible_p(self.to_dense(f), self.domain) def dmp_irreducible_p(self, f): return dmp_irreducible_p(self.to_dense(f), self.ngens-1, self.domain) def dup_sturm(self, f): seq = dup_sturm(self.to_dense(f), self.domain) return list(map(self.from_dense, seq)) def dup_sqf_p(self, f): return dup_sqf_p(self.to_dense(f), self.domain) def dmp_sqf_p(self, f): return dmp_sqf_p(self.to_dense(f), self.ngens-1, self.domain) def dup_sqf_norm(self, f): s, F, R = dup_sqf_norm(self.to_dense(f), self.domain) return (s, self.from_dense(F), self.to_ground().from_dense(R)) def dmp_sqf_norm(self, f): s, F, R = dmp_sqf_norm(self.to_dense(f), self.ngens-1, self.domain) return (s, self.from_dense(F), self.to_ground().from_dense(R)) def dup_gf_sqf_part(self, f): return self.from_dense(dup_gf_sqf_part(self.to_dense(f), self.domain)) def dmp_gf_sqf_part(self, f): return self.from_dense(dmp_gf_sqf_part(self.to_dense(f), self.domain)) def dup_sqf_part(self, f): return self.from_dense(dup_sqf_part(self.to_dense(f), self.domain)) def dmp_sqf_part(self, f): return self.from_dense(dmp_sqf_part(self.to_dense(f), self.ngens-1, self.domain)) def dup_gf_sqf_list(self, f, all=False): coeff, factors = dup_gf_sqf_list(self.to_dense(f), self.domain, all=all) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dmp_gf_sqf_list(self, f, all=False): coeff, factors = dmp_gf_sqf_list(self.to_dense(f), self.ngens-1, self.domain, all=all) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dup_sqf_list(self, f, all=False): coeff, factors = dup_sqf_list(self.to_dense(f), self.domain, all=all) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dup_sqf_list_include(self, f, all=False): factors = dup_sqf_list_include(self.to_dense(f), self.domain, all=all) return [ (self.from_dense(g), k) for g, k in factors ] def dmp_sqf_list(self, f, all=False): coeff, factors = dmp_sqf_list(self.to_dense(f), self.ngens-1, self.domain, all=all) return (coeff, [ (self.from_dense(g), k) for g, k in factors ]) def dmp_sqf_list_include(self, f, all=False): factors = dmp_sqf_list_include(self.to_dense(f), self.ngens-1, self.domain, all=all) return [ (self.from_dense(g), k) for g, k in factors ] def dup_gff_list(self, f): factors = dup_gff_list(self.to_dense(f), self.domain) return [ (self.from_dense(g), k) for g, k in factors ] def dmp_gff_list(self, f): factors = dmp_gff_list(self.to_dense(f), self.ngens-1, self.domain) return [ (self.from_dense(g), k) for g, k in factors ] def dup_root_upper_bound(self, f): return dup_root_upper_bound(self.to_dense(f), self.domain) def dup_root_lower_bound(self, f): return dup_root_lower_bound(self.to_dense(f), self.domain) def dup_step_refine_real_root(self, f, M, fast=False): return dup_step_refine_real_root(self.to_dense(f), M, self.domain, fast=fast) def dup_inner_refine_real_root(self, f, M, eps=None, steps=None, disjoint=None, fast=False, mobius=False): return dup_inner_refine_real_root(self.to_dense(f), M, self.domain, eps=eps, steps=steps, disjoint=disjoint, fast=fast, mobius=mobius) def dup_outer_refine_real_root(self, f, s, t, eps=None, steps=None, disjoint=None, fast=False): return dup_outer_refine_real_root(self.to_dense(f), s, t, self.domain, eps=eps, steps=steps, disjoint=disjoint, fast=fast) def dup_refine_real_root(self, f, s, t, eps=None, steps=None, disjoint=None, fast=False): return dup_refine_real_root(self.to_dense(f), s, t, self.domain, eps=eps, steps=steps, disjoint=disjoint, fast=fast) def dup_inner_isolate_real_roots(self, f, eps=None, fast=False): return dup_inner_isolate_real_roots(self.to_dense(f), self.domain, eps=eps, fast=fast) def dup_inner_isolate_positive_roots(self, f, eps=None, inf=None, sup=None, fast=False, mobius=False): return dup_inner_isolate_positive_roots(self.to_dense(f), self.domain, eps=eps, inf=inf, sup=sup, fast=fast, mobius=mobius) def dup_inner_isolate_negative_roots(self, f, inf=None, sup=None, eps=None, fast=False, mobius=False): return dup_inner_isolate_negative_roots(self.to_dense(f), self.domain, inf=inf, sup=sup, eps=eps, fast=fast, mobius=mobius) def dup_isolate_real_roots_sqf(self, f, eps=None, inf=None, sup=None, fast=False, blackbox=False): return dup_isolate_real_roots_sqf(self.to_dense(f), self.domain, eps=eps, inf=inf, sup=sup, fast=fast, blackbox=blackbox) def dup_isolate_real_roots(self, f, eps=None, inf=None, sup=None, basis=False, fast=False): return dup_isolate_real_roots(self.to_dense(f), self.domain, eps=eps, inf=inf, sup=sup, basis=basis, fast=fast) def dup_isolate_real_roots_list(self, polys, eps=None, inf=None, sup=None, strict=False, basis=False, fast=False): return dup_isolate_real_roots_list(list(map(self.to_dense, polys)), self.domain, eps=eps, inf=inf, sup=sup, strict=strict, basis=basis, fast=fast) def dup_count_real_roots(self, f, inf=None, sup=None): return dup_count_real_roots(self.to_dense(f), self.domain, inf=inf, sup=sup) def dup_count_complex_roots(self, f, inf=None, sup=None, exclude=None): return dup_count_complex_roots(self.to_dense(f), self.domain, inf=inf, sup=sup, exclude=exclude) def dup_isolate_complex_roots_sqf(self, f, eps=None, inf=None, sup=None, blackbox=False): return dup_isolate_complex_roots_sqf(self.to_dense(f), self.domain, eps=eps, inf=inf, sup=sup, blackbox=blackbox) def dup_isolate_all_roots_sqf(self, f, eps=None, inf=None, sup=None, fast=False, blackbox=False): return dup_isolate_all_roots_sqf(self.to_dense(f), self.domain, eps=eps, inf=inf, sup=sup, fast=fast, blackbox=blackbox) def dup_isolate_all_roots(self, f, eps=None, inf=None, sup=None, fast=False): return dup_isolate_all_roots(self.to_dense(f), self.domain, eps=eps, inf=inf, sup=sup, fast=fast) def fateman_poly_F_1(self): from sympy.polys.specialpolys import dmp_fateman_poly_F_1 return tuple(map(self.from_dense, dmp_fateman_poly_F_1(self.ngens-1, self.domain))) def fateman_poly_F_2(self): from sympy.polys.specialpolys import dmp_fateman_poly_F_2 return tuple(map(self.from_dense, dmp_fateman_poly_F_2(self.ngens-1, self.domain))) def fateman_poly_F_3(self): from sympy.polys.specialpolys import dmp_fateman_poly_F_3 return tuple(map(self.from_dense, dmp_fateman_poly_F_3(self.ngens-1, self.domain))) def to_gf_dense(self, element): return gf_strip([ self.domain.dom.convert(c, self.domain) for c in self.wrap(element).to_dense() ]) def from_gf_dense(self, element): return self.from_dict(dmp_to_dict(element, self.ngens-1, self.domain.dom)) def gf_degree(self, f): return gf_degree(self.to_gf_dense(f)) def gf_LC(self, f): return gf_LC(self.to_gf_dense(f), self.domain.dom) def gf_TC(self, f): return gf_TC(self.to_gf_dense(f), self.domain.dom) def gf_strip(self, f): return self.from_gf_dense(gf_strip(self.to_gf_dense(f))) def gf_trunc(self, f): return self.from_gf_dense(gf_strip(self.to_gf_dense(f), self.domain.mod)) def gf_normal(self, f): return self.from_gf_dense(gf_strip(self.to_gf_dense(f), self.domain.mod, self.domain.dom)) def gf_from_dict(self, f): return self.from_gf_dense(gf_from_dict(f, self.domain.mod, self.domain.dom)) def gf_to_dict(self, f, symmetric=True): return gf_to_dict(self.to_gf_dense(f), self.domain.mod, symmetric=symmetric) def gf_from_int_poly(self, f): return self.from_gf_dense(gf_from_int_poly(f, self.domain.mod)) def gf_to_int_poly(self, f, symmetric=True): return gf_to_int_poly(self.to_gf_dense(f), self.domain.mod, symmetric=symmetric) def gf_neg(self, f): return self.from_gf_dense(gf_neg(self.to_gf_dense(f), self.domain.mod, self.domain.dom)) def gf_add_ground(self, f, a): return self.from_gf_dense(gf_add_ground(self.to_gf_dense(f), a, self.domain.mod, self.domain.dom)) def gf_sub_ground(self, f, a): return self.from_gf_dense(gf_sub_ground(self.to_gf_dense(f), a, self.domain.mod, self.domain.dom)) def gf_mul_ground(self, f, a): return self.from_gf_dense(gf_mul_ground(self.to_gf_dense(f), a, self.domain.mod, self.domain.dom)) def gf_quo_ground(self, f, a): return self.from_gf_dense(gf_quo_ground(self.to_gf_dense(f), a, self.domain.mod, self.domain.dom)) def gf_add(self, f, g): return self.from_gf_dense(gf_add(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_sub(self, f, g): return self.from_gf_dense(gf_sub(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_mul(self, f, g): return self.from_gf_dense(gf_mul(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_sqr(self, f): return self.from_gf_dense(gf_sqr(self.to_gf_dense(f), self.domain.mod, self.domain.dom)) def gf_add_mul(self, f, g, h): return self.from_gf_dense(gf_add_mul(self.to_gf_dense(f), self.to_gf_dense(g), self.to_gf_dense(h), self.domain.mod, self.domain.dom)) def gf_sub_mul(self, f, g, h): return self.from_gf_dense(gf_sub_mul(self.to_gf_dense(f), self.to_gf_dense(g), self.to_gf_dense(h), self.domain.mod, self.domain.dom)) def gf_expand(self, F): return self.from_gf_dense(gf_expand(list(map(self.to_gf_dense, F)), self.domain.mod, self.domain.dom)) def gf_div(self, f, g): q, r = gf_div(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom) return self.from_gf_dense(q), self.from_gf_dense(r) def gf_rem(self, f, g): return self.from_gf_dense(gf_rem(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_quo(self, f, g): return self.from_gf_dense(gf_quo(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_exquo(self, f, g): return self.from_gf_dense(gf_exquo(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_lshift(self, f, n): return self.from_gf_dense(gf_lshift(self.to_gf_dense(f), n, self.domain.dom)) def gf_rshift(self, f, n): return self.from_gf_dense(gf_rshift(self.to_gf_dense(f), n, self.domain.dom)) def gf_pow(self, f, n): return self.from_gf_dense(gf_pow(self.to_gf_dense(f), n, self.domain.mod, self.domain.dom)) def gf_pow_mod(self, f, n, g): return self.from_gf_dense(gf_pow_mod(self.to_gf_dense(f), n, self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_cofactors(self, f, g): h, cff, cfg = gf_cofactors(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom) return self.from_gf_dense(h), self.from_gf_dense(cff), self.from_gf_dense(cfg) def gf_gcd(self, f, g): return self.from_gf_dense(gf_gcd(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_lcm(self, f, g): return self.from_gf_dense(gf_lcm(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_gcdex(self, f, g): return self.from_gf_dense(gf_gcdex(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_monic(self, f): return self.from_gf_dense(gf_monic(self.to_gf_dense(f), self.domain.mod, self.domain.dom)) def gf_diff(self, f): return self.from_gf_dense(gf_diff(self.to_gf_dense(f), self.domain.mod, self.domain.dom)) def gf_eval(self, f, a): return gf_eval(self.to_gf_dense(f), a, self.domain.mod, self.domain.dom) def gf_multi_eval(self, f, A): return gf_multi_eval(self.to_gf_dense(f), A, self.domain.mod, self.domain.dom) def gf_compose(self, f, g): return self.from_gf_dense(gf_compose(self.to_gf_dense(f), self.to_gf_dense(g), self.domain.mod, self.domain.dom)) def gf_compose_mod(self, g, h, f): return self.from_gf_dense(gf_compose_mod(self.to_gf_dense(g), self.to_gf_dense(h), self.to_gf_dense(f), self.domain.mod, self.domain.dom)) def gf_trace_map(self, a, b, c, n, f): a = self.to_gf_dense(a) b = self.to_gf_dense(b) c = self.to_gf_dense(c) f = self.to_gf_dense(f) U, V = gf_trace_map(a, b, c, n, f, self.domain.mod, self.domain.dom) return self.from_gf_dense(U), self.from_gf_dense(V) def gf_random(self, n): return self.from_gf_dense(gf_random(n, self.domain.mod, self.domain.dom)) def gf_irreducible(self, n): return self.from_gf_dense(gf_irreducible(n, self.domain.mod, self.domain.dom)) def gf_irred_p_ben_or(self, f): return gf_irred_p_ben_or(self.to_gf_dense(f), self.domain.mod, self.domain.dom) def gf_irred_p_rabin(self, f): return gf_irred_p_rabin(self.to_gf_dense(f), self.domain.mod, self.domain.dom) def gf_irreducible_p(self, f): return gf_irreducible_p(self.to_gf_dense(f), self.domain.mod, self.domain.dom) def gf_sqf_p(self, f): return gf_sqf_p(self.to_gf_dense(f), self.domain.mod, self.domain.dom) def gf_sqf_part(self, f): return self.from_gf_dense(gf_sqf_part(self.to_gf_dense(f), self.domain.mod, self.domain.dom)) def gf_sqf_list(self, f, all=False): coeff, factors = gf_sqf_part(self.to_gf_dense(f), self.domain.mod, self.domain.dom, all=all) return coeff, [ (self.from_gf_dense(g), k) for g, k in factors ] def gf_Qmatrix(self, f): return gf_Qmatrix(self.to_gf_dense(f), self.domain.mod, self.domain.dom) def gf_berlekamp(self, f): factors = gf_berlekamp(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return [ self.from_gf_dense(g) for g in factors ] def gf_ddf_zassenhaus(self, f): factors = gf_ddf_zassenhaus(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return [ (self.from_gf_dense(g), k) for g, k in factors ] def gf_edf_zassenhaus(self, f, n): factors = gf_edf_zassenhaus(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return [ self.from_gf_dense(g) for g in factors ] def gf_ddf_shoup(self, f): factors = gf_ddf_shoup(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return [ (self.from_gf_dense(g), k) for g, k in factors ] def gf_edf_shoup(self, f, n): factors = gf_edf_shoup(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return [ self.from_gf_dense(g) for g in factors ] def gf_zassenhaus(self, f): factors = gf_zassenhaus(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return [ self.from_gf_dense(g) for g in factors ] def gf_shoup(self, f): factors = gf_shoup(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return [ self.from_gf_dense(g) for g in factors ] def gf_factor_sqf(self, f, method=None): coeff, factors = gf_factor_sqf(self.to_gf_dense(f), self.domain.mod, self.domain.dom, method=method) return coeff, [ self.from_gf_dense(g) for g in factors ] def gf_factor(self, f): coeff, factors = gf_factor(self.to_gf_dense(f), self.domain.mod, self.domain.dom) return coeff, [ (self.from_gf_dense(g), k) for g, k in factors ]
bsd-3-clause
e-sensing/wtss.py
src/setup.py
1
1272
# # Copyright (C) 2014 National Institute For Space Research (INPE) - Brazil. # # This file is part of Python Client API for Web Time Series Service. # # Web Time Series Service for Python is free software: you can # redistribute it and/or modify it under the terms of the # GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # Web Time Series Service for Python is distributed in the hope that # it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Web Time Series Service for Python. See LICENSE. If not, write to # e-sensing team at <esensing-team@dpi.inpe.br>. # from setuptools import setup setup(name='wtss', version='0.5.0', description='Python Client API for Web Time Series Service', url='https://github.com/e-sensing/wtss.py', author='Gilberto Ribeiro de Queiroz', author_email='gribeiro@dpi.inpe.br', license='LGPL3', packages=['wtss'], zip_safe=False)
lgpl-3.0
kaixinjxq/crosswalk-test-suite
cordova/cordova-sampleapp-android-tests/sampleapp/Eh_build.py
18
1913
#!/usr/bin/env python # # Copyright (c) 2015 Intel Corporation. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of works must retain the original copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the original copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Intel Corporation nor the names of its contributors # may be used to endorse or promote products derived from this work without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: # Zhu, Yongyong <yongyongx.zhu@intel.com> import unittest import os import commands import glob import comm class TestEhAppBuild(unittest.TestCase): def test_build(self): comm.setUp() app_name = "Eh" sample_src_pref = "/tmp/crosswalk-demos/workshop-cca-eh" comm.buildGoogleApp(app_name, sample_src_pref, self) if __name__ == '__main__': unittest.main()
bsd-3-clause
adrianschroeter/kiwi
test/unit/tasks_system_prepare_test.py
1
8707
import sys import mock import os from mock import patch, call import kiwi from .test_helper import argv_kiwi_tests from kiwi.tasks.system_prepare import SystemPrepareTask class TestSystemPrepareTask(object): def setup(self): sys.argv = [ sys.argv[0], '--profile', 'vmxFlavour', 'system', 'prepare', '--description', '../data/description', '--root', '../data/root-dir' ] self.abs_root_dir = os.path.abspath('../data/root-dir') kiwi.tasks.system_prepare.Privileges = mock.Mock() self.runtime_checker = mock.Mock() kiwi.tasks.base.RuntimeChecker = mock.Mock( return_value=self.runtime_checker ) self.runtime_config = mock.Mock() kiwi.tasks.base.RuntimeConfig = mock.Mock( return_value=self.runtime_config ) self.system_prepare = mock.Mock() kiwi.tasks.system_prepare.SystemPrepare = mock.Mock( return_value=self.system_prepare ) self.manager = mock.Mock() self.system_prepare.setup_repositories = mock.Mock( return_value=self.manager ) self.setup = mock.Mock() kiwi.tasks.system_prepare.SystemSetup = mock.Mock( return_value=self.setup ) self.profile = mock.Mock() self.profile.dot_profile = dict() kiwi.tasks.system_prepare.Profile = mock.Mock( return_value=self.profile ) kiwi.tasks.system_prepare.Help = mock.Mock( return_value=mock.Mock() ) self.task = SystemPrepareTask() def teardown(self): sys.argv = argv_kiwi_tests def _init_command_args(self): self.task.command_args = {} self.task.command_args['help'] = False self.task.command_args['prepare'] = False self.task.command_args['--description'] = '../data/description' self.task.command_args['--root'] = '../data/root-dir' self.task.command_args['--allow-existing-root'] = False self.task.command_args['--set-repo'] = None self.task.command_args['--add-repo'] = [] self.task.command_args['--add-package'] = [] self.task.command_args['--delete-package'] = [] self.task.command_args['--ignore-repos'] = False self.task.command_args['--ignore-repos-used-for-build'] = False self.task.command_args['--clear-cache'] = False self.task.command_args['--set-container-derived-from'] = None self.task.command_args['--set-container-tag'] = None self.task.command_args['--signing-key'] = None def test_process_system_prepare(self): self._init_command_args() self.task.command_args['prepare'] = True self.task.command_args['--clear-cache'] = True self.task.process() self.runtime_checker.check_boot_description_exists.assert_called_once_with() self.runtime_checker.check_consistent_kernel_in_boot_and_system_image.assert_called_once_with() self.runtime_checker.check_docker_tool_chain_installed.assert_called_once_with() self.runtime_checker.check_volume_setup_has_no_root_definition.assert_called_once_with() self.runtime_checker.check_xen_uniquely_setup_as_server_or_guest.assert_called_once_with() self.runtime_checker.check_target_directory_not_in_shared_cache.assert_called_once_with( self.abs_root_dir ) self.runtime_checker.check_mediacheck_only_for_x86_arch.assert_called_once_with() self.runtime_checker.check_dracut_module_for_live_iso_in_package_list.assert_called_once_with() self.runtime_checker.check_repositories_configured.assert_called_once_with() self.runtime_checker.check_dracut_module_for_disk_overlay_in_package_list.assert_called_once_with() self.runtime_checker.check_dracut_module_for_disk_oem_in_package_list.assert_called_once_with() self.runtime_checker.check_dracut_module_for_oem_install_in_package_list.assert_called_once_with() self.runtime_checker.check_efi_mode_for_disk_overlay_correctly_setup.assert_called_once_with() self.system_prepare.setup_repositories.assert_called_once_with(True, None) self.system_prepare.install_bootstrap.assert_called_once_with( self.manager ) self.system_prepare.install_system.assert_called_once_with( self.manager ) self.setup.import_shell_environment.assert_called_once_with( self.profile ) self.setup.import_description.assert_called_once_with() self.setup.import_overlay_files.assert_called_once_with() self.setup.import_repositories_marked_as_imageinclude.assert_called_once_with() self.setup.call_config_script.assert_called_once_with() self.setup.import_image_identifier.assert_called_once_with() self.setup.setup_groups.assert_called_once_with() self.setup.setup_users.assert_called_once_with() self.setup.setup_keyboard_map.assert_called_once_with() self.setup.setup_locale.assert_called_once_with() self.setup.setup_plymouth_splash.assert_called_once_with() self.setup.setup_timezone.assert_called_once_with() self.system_prepare.pinch_system.assert_has_calls( [call(force=False), call(force=True)] ) def test_process_system_prepare_add_package(self): self._init_command_args() self.task.command_args['--add-package'] = ['vim'] self.task.process() self.system_prepare.setup_repositories.assert_called_once_with(False, None) self.system_prepare.install_packages.assert_called_once_with( self.manager, ['vim'] ) def test_process_system_prepare_delete_package(self): self._init_command_args() self.task.command_args['--delete-package'] = ['vim'] self.task.process() self.system_prepare.setup_repositories.assert_called_once_with(False, None) self.system_prepare.delete_packages.assert_called_once_with( self.manager, ['vim'] ) @patch('kiwi.xml_state.XMLState.set_container_config_tag') def test_process_system_prepare_set_container_tag( self, mock_set_container_tag ): self._init_command_args() self.task.command_args['--set-container-tag'] = 'new_tag' self.task.process() mock_set_container_tag.assert_called_once_with( 'new_tag' ) @patch('kiwi.xml_state.XMLState.set_derived_from_image_uri') def test_process_system_prepare_set_derived_from_uri( self, mock_set_derived_from_uri ): self._init_command_args() self.task.command_args['--set-container-derived-from'] = 'file:///new' self.task.process() mock_set_derived_from_uri.assert_called_once_with( 'file:///new' ) @patch('kiwi.xml_state.XMLState.set_repository') def test_process_system_prepare_set_repo(self, mock_state): self._init_command_args() self.task.command_args['--set-repo'] = 'http://example.com,yast2,alias' self.task.process() mock_state.assert_called_once_with( 'http://example.com', 'yast2', 'alias', None, None, None ) @patch('kiwi.xml_state.XMLState.add_repository') def test_process_system_prepare_add_repo(self, mock_state): self._init_command_args() self.task.command_args['--add-repo'] = [ 'http://example.com,yast2,alias,99,true' ] self.task.process() mock_state.assert_called_once_with( 'http://example.com', 'yast2', 'alias', '99', True, None ) def test_process_system_prepare_help(self): self._init_command_args() self.task.command_args['help'] = True self.task.command_args['prepare'] = True self.task.process() self.task.manual.show.assert_called_once_with( 'kiwi::system::prepare' ) @patch('kiwi.xml_state.XMLState.delete_repository_sections') def test_process_system_prepare_delete_repos(self, mock_delete_repos): self._init_command_args() self.task.command_args['--ignore-repos'] = True self.task.process() mock_delete_repos.assert_called_once_with() @patch('kiwi.xml_state.XMLState.delete_repository_sections_used_for_build') def test_process_system_prepare_delete_repos_used_for_build( self, mock_delete_repos ): self._init_command_args() self.task.command_args['--ignore-repos-used-for-build'] = True self.task.process() mock_delete_repos.assert_called_once_with()
gpl-3.0
rockstor/rockstor-core
src/rockstor/smart_manager/views/docker_service.py
2
6894
""" Copyright (c) 2012-2020 RockStor, Inc. <http://rockstor.com> This file is part of RockStor. RockStor is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. RockStor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import json import logging import re import shutil import distro from django.conf import settings from django.db import transaction from rest_framework.response import Response from base_service import BaseServiceDetailView from fs.btrfs import mount_share from smart_manager.models import Service from storageadmin.models import Share from storageadmin.util import handle_exception from system.services import systemctl logger = logging.getLogger(__name__) DOCKERD = "/usr/bin/dockerd" # Distro's for which we have known working conf/docker-distroid.service files. KNOWN_DISTRO_IDS = ["rockstor", "opensuse-leap", "opensuse-tumbleweed"] class DockerServiceView(BaseServiceDetailView): name = "docker" def _validate_root(self, request, root): try: return Share.objects.get(name=root) except Exception as e: logger.exception(e) e_msg = "Share name ({}) does not exist.".format(root) handle_exception(Exception(e_msg), request) @transaction.atomic def post(self, request, command): service = Service.objects.get(name=self.name) if command == "config": config = request.data.get("config", None) root_share = config["root_share"] self._validate_root(request, root_share) self._save_config(service, config) elif command == "start": try: config = self._get_config(service) except Exception as e: logger.exception(e) e_msg = ( "Cannot start without configuration. " "Please configure (System->Services) and try again." ) handle_exception(Exception(e_msg), request) share = self._validate_root(request, config["root_share"]) mnt_pt = "{}{}".format(settings.MNT_PT, share.name) if not share.is_mounted: mount_share(share, mnt_pt) distro_id = distro.id() # for Leap 15 <--> Tumbleweed moves. if distro_id not in KNOWN_DISTRO_IDS: distro_id = "generic" # Write a custom daemon.json file (openSUSE only) conf_file = "{}/docker-daemon.json".format(settings.CONFROOT) if re.match("opensuse", distro_id) is not None: # Write them to file self._write_docker_daemon_conf(conf_file, mnt_pt, request) # Then write the docker.service file try: self._write_docker_service(distro_id, mnt_pt, conf_file) except Exception as e: logger.exception(e) e_msg = "An error occurred while writing the docker.service file" handle_exception(Exception(e_msg), request) if distro_id == "rockstor": socket_file = "{}/docker.socket".format(settings.CONFROOT) shutil.copy(socket_file, "/etc/systemd/system/docker.socket") systemctl(self.name, "enable") systemctl(self.name, "start") elif command == "stop": systemctl(self.name, "stop") systemctl(self.name, "disable") return Response() def _write_docker_daemon_conf(self, outf, data_root, request): """ Takes the default daemon.json from package and replace or add options with Rockstor's customizations. :param outf: string. path to output json conf file :param data_root: path to rockons-root :param request: """ inf = "/etc/docker/daemon.json" try: with open(inf, "r") as f: dconf = json.load(f) except IOError as e: # If default conf cannot be loaded, create a new one: dconf = {} # define options dconf["data-root"] = data_root dconf["storage-driver"] = "btrfs" dconf["storage-opts"] = ["btrfs.min_space=1G"] dconf["log-driver"] = "journald" dconf["log-opts"] = {"tag": "{{.ImageName}}/{{.Name}}"} try: with open(outf, "w") as f: json.dump(dconf, f, indent=2, sort_keys=True) f.write("\n") except IOError as e: logger.exception(e) e_msg = "The dockerd configuration file couldn't be written to disk at {}.".format( outf ) handle_exception(IOError(e_msg), request) def _write_docker_service(self, distro_id, mnt_pt, conf_file): docker_wrapper = "{}bin/docker-wrapper".format(settings.ROOT_DIR) # If openSUSE, source conf file from docker package itself if re.match("opensuse", distro_id) is not None: inf = "/usr/lib/systemd/system/docker.service" else: inf = "{}/docker-{}.service".format(settings.CONFROOT, distro_id) outf = "/etc/systemd/system/docker.service" with open(inf) as ino, open(outf, "w") as outo: for l in ino.readlines(): if re.match("ExecStart=", l) is not None: if re.match("opensuse", distro_id) is not None: # point to config file written by _write_docker_daemon_conf() outo.write("{} --config-file {}\n".format(l.strip(), conf_file)) else: outo.write( "{} {}\n".format( l.strip().replace(DOCKERD, docker_wrapper, 1), mnt_pt ) ) elif re.match("Type=notify", l) is not None: # Our docker wrapper use need NotifyAccess=all: avoids # "Got notification message from PID ####1, but # reception only permitted for main PID ####2" outo.write(l) outo.write("NotifyAccess=all\n") elif re.match("After=", l) is not None: outo.write( "{} {}\n".format(l.strip(), "rockstor-bootstrap.service") ) else: outo.write(l)
gpl-3.0
hwu25/AppPkg
Applications/Python/Python-2.7.2/Lib/rfc822.py
58
34306
"""RFC 2822 message manipulation. Note: This is only a very rough sketch of a full RFC-822 parser; in particular the tokenizing of addresses does not adhere to all the quoting rules. Note: RFC 2822 is a long awaited update to RFC 822. This module should conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some effort at RFC 2822 updates have been made, but a thorough audit has not been performed. Consider any RFC 2822 non-conformance to be a bug. RFC 2822: http://www.faqs.org/rfcs/rfc2822.html RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete) Directions for use: To create a Message object: first open a file, e.g.: fp = open(file, 'r') You can use any other legal way of getting an open file object, e.g. use sys.stdin or call os.popen(). Then pass the open file object to the Message() constructor: m = Message(fp) This class can work with any input object that supports a readline method. If the input object has seek and tell capability, the rewindbody method will work; also illegal lines will be pushed back onto the input stream. If the input object lacks seek but has an `unread' method that can push back a line of input, Message will use that to push back illegal lines. Thus this class can be used to parse messages coming from a buffered stream. The optional `seekable' argument is provided as a workaround for certain stdio libraries in which tell() discards buffered data before discovering that the lseek() system call doesn't work. For maximum portability, you should set the seekable argument to zero to prevent that initial \code{tell} when passing in an unseekable object such as a a file object created from a socket object. If it is 1 on entry -- which it is by default -- the tell() method of the open file object is called once; if this raises an exception, seekable is reset to 0. For other nonzero values of seekable, this test is not made. To get the text of a particular header there are several methods: str = m.getheader(name) str = m.getrawheader(name) where name is the name of the header, e.g. 'Subject'. The difference is that getheader() strips the leading and trailing whitespace, while getrawheader() doesn't. Both functions retain embedded whitespace (including newlines) exactly as they are specified in the header, and leave the case of the text unchanged. For addresses and address lists there are functions realname, mailaddress = m.getaddr(name) list = m.getaddrlist(name) where the latter returns a list of (realname, mailaddr) tuples. There is also a method time = m.getdate(name) which parses a Date-like field and returns a time-compatible tuple, i.e. a tuple such as returned by time.localtime() or accepted by time.mktime(). See the class definition for lower level access methods. There are also some utility functions here. """ # Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com> import time from warnings import warnpy3k warnpy3k("in 3.x, rfc822 has been removed in favor of the email package", stacklevel=2) __all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"] _blanklines = ('\r\n', '\n') # Optimization for islast() class Message: """Represents a single RFC 2822-compliant message.""" def __init__(self, fp, seekable = 1): """Initialize the class instance and read the headers.""" if seekable == 1: # Exercise tell() to make sure it works # (and then assume seek() works, too) try: fp.tell() except (AttributeError, IOError): seekable = 0 self.fp = fp self.seekable = seekable self.startofheaders = None self.startofbody = None # if self.seekable: try: self.startofheaders = self.fp.tell() except IOError: self.seekable = 0 # self.readheaders() # if self.seekable: try: self.startofbody = self.fp.tell() except IOError: self.seekable = 0 def rewindbody(self): """Rewind the file to the start of the body (if seekable).""" if not self.seekable: raise IOError, "unseekable file" self.fp.seek(self.startofbody) def readheaders(self): """Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file). """ self.dict = {} self.unixfrom = '' self.headers = lst = [] self.status = '' headerseen = "" firstline = 1 startofline = unread = tell = None if hasattr(self.fp, 'unread'): unread = self.fp.unread elif self.seekable: tell = self.fp.tell while 1: if tell: try: startofline = tell() except IOError: startofline = tell = None self.seekable = 0 line = self.fp.readline() if not line: self.status = 'EOF in headers' break # Skip unix From name time lines if firstline and line.startswith('From '): self.unixfrom = self.unixfrom + line continue firstline = 0 if headerseen and line[0] in ' \t': # It's a continuation line. lst.append(line) x = (self.dict[headerseen] + "\n " + line.strip()) self.dict[headerseen] = x.strip() continue elif self.iscomment(line): # It's a comment. Ignore it. continue elif self.islast(line): # Note! No pushback here! The delimiter line gets eaten. break headerseen = self.isheader(line) if headerseen: # It's a legal header line, save it. lst.append(line) self.dict[headerseen] = line[len(headerseen)+1:].strip() continue else: # It's not a header line; throw it back and stop here. if not self.dict: self.status = 'No headers' else: self.status = 'Non-header line where header expected' # Try to undo the read. if unread: unread(line) elif tell: self.fp.seek(startofline) else: self.status = self.status + '; bad seek' break def isheader(self, line): """Determine whether a given line is a legal header. This method should return the header name, suitably canonicalized. You may override this method in order to use Message parsing on tagged data in RFC 2822-like formats with special header formats. """ i = line.find(':') if i > 0: return line[:i].lower() return None def islast(self, line): """Determine whether a line is a legal end of RFC 2822 headers. You may override this method if your application wants to bend the rules, e.g. to strip trailing whitespace, or to recognize MH template separators ('--------'). For convenience (e.g. for code reading from sockets) a line consisting of \r\n also matches. """ return line in _blanklines def iscomment(self, line): """Determine whether a line should be skipped entirely. You may override this method in order to use Message parsing on tagged data in RFC 2822-like formats that support embedded comments or free-text data. """ return False def getallmatchingheaders(self, name): """Find all header lines matching a given header name. Look through the list of headers and find all lines matching a given header name (and their continuation lines). A list of the lines is returned, without interpretation. If the header does not occur, an empty list is returned. If the header occurs multiple times, all occurrences are returned. Case is not important in the header name. """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.headers: if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(line) return lst def getfirstmatchingheader(self, name): """Get the first header line matching name. This is similar to getallmatchingheaders, but it returns only the first matching header (and its continuation lines). """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.headers: if hit: if not line[:1].isspace(): break elif line[:n].lower() == name: hit = 1 if hit: lst.append(line) return lst def getrawheader(self, name): """A higher-level interface to getfirstmatchingheader(). Return a string containing the literal text of the header but with the keyword stripped. All leading, trailing and embedded whitespace is kept in the string, however. Return None if the header does not occur. """ lst = self.getfirstmatchingheader(name) if not lst: return None lst[0] = lst[0][len(name) + 1:] return ''.join(lst) def getheader(self, name, default=None): """Get the header value for a name. This is the normal interface: it returns a stripped version of the header value for a given header name, or None if it doesn't exist. This uses the dictionary version which finds the *last* such header. """ return self.dict.get(name.lower(), default) get = getheader def getheaders(self, name): """Get all values for a header. This returns a list of values for headers given more than once; each value in the result list is stripped in the same way as the result of getheader(). If the header is not given, return an empty list. """ result = [] current = '' have_header = 0 for s in self.getallmatchingheaders(name): if s[0].isspace(): if current: current = "%s\n %s" % (current, s.strip()) else: current = s.strip() else: if have_header: result.append(current) current = s[s.find(":") + 1:].strip() have_header = 1 if have_header: result.append(current) return result def getaddr(self, name): """Get a single address from a header, as a tuple. An example return value: ('Guido van Rossum', 'guido@cwi.nl') """ # New, by Ben Escoto alist = self.getaddrlist(name) if alist: return alist[0] else: return (None, None) def getaddrlist(self, name): """Get a list of addresses from a header. Retrieves a list of addresses from a header, where each address is a tuple as returned by getaddr(). Scans all named headers, so it works properly with multiple To: or Cc: headers for example. """ raw = [] for h in self.getallmatchingheaders(name): if h[0] in ' \t': raw.append(h) else: if raw: raw.append(', ') i = h.find(':') if i > 0: addr = h[i+1:] raw.append(addr) alladdrs = ''.join(raw) a = AddressList(alladdrs) return a.addresslist def getdate(self, name): """Retrieve a date field from a header. Retrieves a date field from the named header, returning a tuple compatible with time.mktime(). """ try: data = self[name] except KeyError: return None return parsedate(data) def getdate_tz(self, name): """Retrieve a date field from a header as a 10-tuple. The first 9 elements make up a tuple compatible with time.mktime(), and the 10th is the offset of the poster's time zone from GMT/UTC. """ try: data = self[name] except KeyError: return None return parsedate_tz(data) # Access as a dictionary (only finds *last* header of each type): def __len__(self): """Get the number of headers in a message.""" return len(self.dict) def __getitem__(self, name): """Get a specific header, as from a dictionary.""" return self.dict[name.lower()] def __setitem__(self, name, value): """Set the value of a header. Note: This is not a perfect inversion of __getitem__, because any changed headers get stuck at the end of the raw-headers list rather than where the altered header was. """ del self[name] # Won't fail if it doesn't exist self.dict[name.lower()] = value text = name + ": " + value for line in text.split("\n"): self.headers.append(line + "\n") def __delitem__(self, name): """Delete all occurrences of a specific header, if it is present.""" name = name.lower() if not name in self.dict: return del self.dict[name] name = name + ':' n = len(name) lst = [] hit = 0 for i in range(len(self.headers)): line = self.headers[i] if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(i) for i in reversed(lst): del self.headers[i] def setdefault(self, name, default=""): lowername = name.lower() if lowername in self.dict: return self.dict[lowername] else: text = name + ": " + default for line in text.split("\n"): self.headers.append(line + "\n") self.dict[lowername] = default return default def has_key(self, name): """Determine whether a message contains the named header.""" return name.lower() in self.dict def __contains__(self, name): """Determine whether a message contains the named header.""" return name.lower() in self.dict def __iter__(self): return iter(self.dict) def keys(self): """Get all of a message's header field names.""" return self.dict.keys() def values(self): """Get all of a message's header field values.""" return self.dict.values() def items(self): """Get all of a message's headers. Returns a list of name, value tuples. """ return self.dict.items() def __str__(self): return ''.join(self.headers) # Utility functions # ----------------- # XXX Should fix unquote() and quote() to be really conformant. # XXX The inverses of the parse functions may also be useful. def unquote(s): """Remove quotes from a string.""" if len(s) > 1: if s.startswith('"') and s.endswith('"'): return s[1:-1].replace('\\\\', '\\').replace('\\"', '"') if s.startswith('<') and s.endswith('>'): return s[1:-1] return s def quote(s): """Add quotes around a string.""" return s.replace('\\', '\\\\').replace('"', '\\"') def parseaddr(address): """Parse an address into a (realname, mailaddr) tuple.""" a = AddressList(address) lst = a.addresslist if not lst: return (None, None) return lst[0] class AddrlistClass: """Address parser class by Ben Escoto. To understand what this class does, it helps to have a copy of RFC 2822 in front of you. http://www.faqs.org/rfcs/rfc2822.html Note: this class interface is deprecated and may be removed in the future. Use rfc822.AddressList instead. """ def __init__(self, field): """Initialize a new instance. `field' is an unparsed address header field, containing one or more addresses. """ self.specials = '()<>@,:;.\"[]' self.pos = 0 self.LWS = ' \t' self.CR = '\r\n' self.atomends = self.specials + self.LWS + self.CR # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it # is obsolete syntax. RFC 2822 requires that we recognize obsolete # syntax, so allow dots in phrases. self.phraseends = self.atomends.replace('.', '') self.field = field self.commentlist = [] def gotonext(self): """Parse up to the start of the next address.""" while self.pos < len(self.field): if self.field[self.pos] in self.LWS + '\n\r': self.pos = self.pos + 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) else: break def getaddrlist(self): """Parse all addresses. Returns a list containing all of the addresses. """ result = [] ad = self.getaddress() while ad: result += ad ad = self.getaddress() return result def getaddress(self): """Parse the next address.""" self.commentlist = [] self.gotonext() oldpos = self.pos oldcl = self.commentlist plist = self.getphraselist() self.gotonext() returnlist = [] if self.pos >= len(self.field): # Bad email address technically, no domain. if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in '.@': # email address is just an addrspec # this isn't very efficient since we start over self.pos = oldpos self.commentlist = oldcl addrspec = self.getaddrspec() returnlist = [(' '.join(self.commentlist), addrspec)] elif self.field[self.pos] == ':': # address is a group returnlist = [] fieldlen = len(self.field) self.pos += 1 while self.pos < len(self.field): self.gotonext() if self.pos < fieldlen and self.field[self.pos] == ';': self.pos += 1 break returnlist = returnlist + self.getaddress() elif self.field[self.pos] == '<': # Address is a phrase then a route addr routeaddr = self.getrouteaddr() if self.commentlist: returnlist = [(' '.join(plist) + ' (' + \ ' '.join(self.commentlist) + ')', routeaddr)] else: returnlist = [(' '.join(plist), routeaddr)] else: if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in self.specials: self.pos += 1 self.gotonext() if self.pos < len(self.field) and self.field[self.pos] == ',': self.pos += 1 return returnlist def getrouteaddr(self): """Parse a route address (Return-path value). This method just skips all the route stuff and returns the addrspec. """ if self.field[self.pos] != '<': return expectroute = 0 self.pos += 1 self.gotonext() adlist = "" while self.pos < len(self.field): if expectroute: self.getdomain() expectroute = 0 elif self.field[self.pos] == '>': self.pos += 1 break elif self.field[self.pos] == '@': self.pos += 1 expectroute = 1 elif self.field[self.pos] == ':': self.pos += 1 else: adlist = self.getaddrspec() self.pos += 1 break self.gotonext() return adlist def getaddrspec(self): """Parse an RFC 2822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): if self.field[self.pos] == '.': aslist.append('.') self.pos += 1 elif self.field[self.pos] == '"': aslist.append('"%s"' % self.getquote()) elif self.field[self.pos] in self.atomends: break else: aslist.append(self.getatom()) self.gotonext() if self.pos >= len(self.field) or self.field[self.pos] != '@': return ''.join(aslist) aslist.append('@') self.pos += 1 self.gotonext() return ''.join(aslist) + self.getdomain() def getdomain(self): """Get the complete domain name from an address.""" sdlist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] == '[': sdlist.append(self.getdomainliteral()) elif self.field[self.pos] == '.': self.pos += 1 sdlist.append('.') elif self.field[self.pos] in self.atomends: break else: sdlist.append(self.getatom()) return ''.join(sdlist) def getdelimited(self, beginchar, endchars, allowcomments = 1): """Parse a header fragment delimited by special characters. `beginchar' is the start character for the fragment. If self is not looking at an instance of `beginchar' then getdelimited returns the empty string. `endchars' is a sequence of allowable end-delimiting characters. Parsing stops when one of these is encountered. If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed within the parsed fragment. """ if self.field[self.pos] != beginchar: return '' slist = [''] quote = 0 self.pos += 1 while self.pos < len(self.field): if quote == 1: slist.append(self.field[self.pos]) quote = 0 elif self.field[self.pos] in endchars: self.pos += 1 break elif allowcomments and self.field[self.pos] == '(': slist.append(self.getcomment()) continue # have already advanced pos from getcomment elif self.field[self.pos] == '\\': quote = 1 else: slist.append(self.field[self.pos]) self.pos += 1 return ''.join(slist) def getquote(self): """Get a quote-delimited fragment from self's field.""" return self.getdelimited('"', '"\r', 0) def getcomment(self): """Get a parenthesis-delimited fragment from self's field.""" return self.getdelimited('(', ')\r', 1) def getdomainliteral(self): """Parse an RFC 2822 domain-literal.""" return '[%s]' % self.getdelimited('[', ']\r', 0) def getatom(self, atomends=None): """Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).""" atomlist = [''] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return ''.join(atomlist) def getphraselist(self): """Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. """ plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist class AddressList(AddrlistClass): """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" def __init__(self, field): AddrlistClass.__init__(self, field) if field: self.addresslist = self.getaddrlist() else: self.addresslist = [] def __len__(self): return len(self.addresslist) def __str__(self): return ", ".join(map(dump_address_pair, self.addresslist)) def __add__(self, other): # Set union newaddr = AddressList(None) newaddr.addresslist = self.addresslist[:] for x in other.addresslist: if not x in self.addresslist: newaddr.addresslist.append(x) return newaddr def __iadd__(self, other): # Set union, in-place for x in other.addresslist: if not x in self.addresslist: self.addresslist.append(x) return self def __sub__(self, other): # Set difference newaddr = AddressList(None) for x in self.addresslist: if not x in other.addresslist: newaddr.addresslist.append(x) return newaddr def __isub__(self, other): # Set difference, in-place for x in other.addresslist: if x in self.addresslist: self.addresslist.remove(x) return self def __getitem__(self, index): # Make indexing, slices, and 'in' work return self.addresslist[index] def dump_address_pair(pair): """Dump a (name, address) pair in a canonicalized form.""" if pair[0]: return '"' + pair[0] + '" <' + pair[1] + '>' else: return pair[1] # Parse a date field _monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] _daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # The timezone table does not include the military time zones defined # in RFC822, other than Z. According to RFC1123, the description in # RFC822 gets the signs wrong, so we can't rely on any such time # zones. RFC1123 recommends that numeric timezone indicators be used # instead of timezone names. _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) 'EST': -500, 'EDT': -400, # Eastern 'CST': -600, 'CDT': -500, # Central 'MST': -700, 'MDT': -600, # Mountain 'PST': -800, 'PDT': -700 # Pacific } def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ if not data: return None data = data.split() if data[0][-1] in (',', '.') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] else: # no space after the "weekday,"? i = data[0].rfind(',') if i >= 0: data[0] = data[0][i+1:] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if not mm in _monthnames: dd, mm = mm, dd.lower() if not mm in _monthnames: return None mm = _monthnames.index(mm)+1 if mm > 12: mm = mm - 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except ValueError: return None tzoffset = None tz = tz.upper() if tz in _timezones: tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset) def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if t is None: return t return t[:9] def mktime_tz(data): """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp.""" if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = time.mktime(data[:8] + (0,)) return t - data[9] - time.timezone def formatdate(timeval=None): """Returns time format preferred for Internet standards. Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 According to RFC 1123, day and month names must always be in English. If not for that, this code could use strftime(). It can't because strftime() honors the locale and could generated non-English names. """ if timeval is None: timeval = time.time() timeval = time.gmtime(timeval) return "%s, %02d %s %04d %02d:%02d:%02d GMT" % ( ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]], timeval[2], ("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1], timeval[0], timeval[3], timeval[4], timeval[5]) # When used as script, run a small test program. # The first command line argument must be a filename containing one # message in RFC-822 format. if __name__ == '__main__': import sys, os file = os.path.join(os.environ['HOME'], 'Mail/inbox/1') if sys.argv[1:]: file = sys.argv[1] f = open(file, 'r') m = Message(f) print 'From:', m.getaddr('from') print 'To:', m.getaddrlist('to') print 'Subject:', m.getheader('subject') print 'Date:', m.getheader('date') date = m.getdate_tz('date') tz = date[-1] date = time.localtime(mktime_tz(date)) if date: print 'ParsedDate:', time.asctime(date), hhmmss = tz hhmm, ss = divmod(hhmmss, 60) hh, mm = divmod(hhmm, 60) print "%+03d%02d" % (hh, mm), if ss: print ".%02d" % ss, print else: print 'ParsedDate:', None m.rewindbody() n = 0 while f.readline(): n += 1 print 'Lines:', n print '-'*70 print 'len =', len(m) if 'Date' in m: print 'Date =', m['Date'] if 'X-Nonsense' in m: pass print 'keys =', m.keys() print 'values =', m.values() print 'items =', m.items()
bsd-2-clause
voer-platform/vp.web
vpw/migrations/0012_auto__add_settings.py
1
8038
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Settings' db.create_table(u'vpw_settings', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=64)), ('value', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('language', self.gf('django.db.models.fields.CharField')(default='vi', max_length=8)), )) db.send_create_signal(u'vpw', ['Settings']) def backwards(self, orm): # Deleting model 'Settings' db.delete_table(u'vpw_settings') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'vpw.author': { 'Meta': {'object_name': 'Author'}, 'author_id': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}) }, u'vpw.featuredauthor': { 'Meta': {'object_name': 'FeaturedAuthor'}, 'author_id': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'weight': ('django.db.models.fields.IntegerField', [], {'default': '1'}) }, u'vpw.material': { 'Meta': {'object_name': 'Material'}, 'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'categories': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), 'coeditor': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'derived_from': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'editor': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'default': "'vi'", 'max_length': '2', 'blank': 'True'}), 'license_id': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}), 'licensor': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'maintainer': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'material_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}), 'material_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}), 'text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'translator': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'version': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, u'vpw.materialfeature': { 'Meta': {'object_name': 'MaterialFeature'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'material_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'weight': ('django.db.models.fields.IntegerField', [], {'default': '1'}) }, u'vpw.settings': { 'Meta': {'object_name': 'Settings'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'default': "'vi'", 'max_length': '8'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}) } } complete_apps = ['vpw']
agpl-3.0
alianmohammad/pd-gem5
src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_floating_point.py
90
4105
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop CVTSS2SD_XMM_XMM { cvtf2f xmml, xmmlm, destSize=8, srcSize=4, ext=Scalar }; def macroop CVTSS2SD_XMM_M { ldfp ufp1, seg, sib, disp, dataSize=8 cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=Scalar }; def macroop CVTSS2SD_XMM_P { rdip t7 ldfp ufp1, seg, riprel, disp, dataSize=8 cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=Scalar }; def macroop CVTSD2SS_XMM_XMM { cvtf2f xmml, xmmlm, destSize=4, srcSize=8, ext=Scalar }; def macroop CVTSD2SS_XMM_M { ldfp ufp1, seg, sib, disp, dataSize=8 cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=Scalar }; def macroop CVTSD2SS_XMM_P { rdip t7 ldfp ufp1, seg, riprel, disp, dataSize=8 cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=Scalar }; def macroop CVTPS2PD_XMM_XMM { cvtf2f xmmh, xmmlm, destSize=8, srcSize=4, ext=2 cvtf2f xmml, xmmlm, destSize=8, srcSize=4, ext=0 }; def macroop CVTPS2PD_XMM_M { ldfp ufp1, seg, sib, disp, dataSize=8 cvtf2f xmmh, ufp1, destSize=8, srcSize=4, ext=2 cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=0 }; def macroop CVTPS2PD_XMM_P { rdip t7 ldfp ufp1, seg, riprel, disp, dataSize=8 cvtf2f xmmh, ufp1, destSize=8, srcSize=4, ext=2 cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=0 }; def macroop CVTPD2PS_XMM_XMM { cvtf2f xmml, xmmlm, destSize=4, srcSize=8, ext=0 cvtf2f xmml, xmmhm, destSize=4, srcSize=8, ext=2 lfpimm xmmh, 0 }; def macroop CVTPD2PS_XMM_M { ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8 cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=0 cvtf2f xmml, ufp2, destSize=4, srcSize=8, ext=2 lfpimm xmmh, 0 }; def macroop CVTPD2PS_XMM_P { rdip t7 ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8 cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=0 cvtf2f xmml, ufp2, destSize=4, srcSize=8, ext=2 lfpimm xmmh, 0 }; '''
bsd-3-clause
dset0x/invenio
invenio/base/utils.py
4
11822
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Implement various utils. Utilities that could potentially exist in separate packages should be placed in this file. """ import sys import warnings from collections import namedtuple from six import StringIO import logging import shlex import six from flask import has_app_context, current_app from functools import partial, wraps from werkzeug.utils import import_string, find_modules def import_module_from_packages(name, app=None, packages=None, silent=False): """Import modules from packages.""" warnings.warn("Use of import_module_from_packages has been deprecated." " Please use Flask-Registry instead.", DeprecationWarning) if app is None and has_app_context(): app = current_app if app is None: raise Exception( 'Working outside application context or provide app' ) if packages is None: packages = app.config.get('PACKAGES', []) for package in packages: if package.endswith('.*'): for module in find_modules(package[:-2], include_packages=True): try: yield import_string(module + '.' + name, silent) except ImportError: pass except Exception: app.logger.exception("could not import %s.%s", package, name) continue try: yield import_string(package + '.' + name, silent) except ImportError: pass except Exception: app.logger.exception("could not import %s.%s", package, name) autodiscover_user_settings = partial(import_module_from_packages, 'user_settings') autodiscover_managers = partial(import_module_from_packages, 'manage') def try_to_eval(string, context={}, **general_context): """Take care of evaluating the python expression. If an exception happens, it tries to import the needed module. @param string: String to evaluate @param context: Context needed, in some cases, to evaluate the string @return: The value of the expression inside string """ if not string: return None res = None imports = [] general_context.update(context) simple = False while True: try: # kwalitee: disable=eval res = eval(string, globals().update(general_context), locals()) except NameError as err: #Try first to import using werkzeug import_string try: from werkzeug.utils import import_string if "." in string: part = string.split('.')[0] import_string(part) for i in string.split('.')[1:]: part += '.' + i import_string(part) continue else: simple = True except: pass import_name = str(err).split("'")[1] if import_name not in imports: if import_name in context: globals()[import_name] = context[import_name] else: globals()[import_name] = __import__(import_name) imports.append(import_name) continue elif simple: import_name = str(err).split("'")[0] if import_name in context: globals()[import_name] = context[import_name] else: globals()[import_name] = __import__(import_name) imports.append(import_name) continue raise ImportError("Can't import the needed module to evaluate %s" (string, )) import os if isinstance(res, type(os)): raise ImportError return res # Python 2.6 implementation of logging.captureWarnings introduced in Python 2.7 # Copy/pasted from logging/__init__.py. Can be removed as soon as dependency on # Python 2.6 is removed. class NullHandler(logging.Handler): """This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package. """ def handle(self, record): """Handle.""" pass def emit(self, record): """Emit.""" pass def createLock(self): """Lock.""" self.lock = None _warnings_showwarning = None def _showwarning(message, category, filename, lineno, file=None, line=None): """Implementation of showwarnings which redirects to logging. It will first check to see if the file parameter is None. If a file is specified, it will delegate to the original warnings implementation of showwarning. Otherwise, it will call warnings.formatwarning and will log the resulting string to a warnings logger named "py.warnings" with level logging.WARNING. """ if sys.hexversion >= 0x2070000: raise RuntimeError("_showwarning() should not be used on Python 2.7+") if file is not None: if _warnings_showwarning is not None: _warnings_showwarning(message, category, filename, lineno, file, line) else: s = warnings.formatwarning(message, category, filename, lineno, line) logger = logging.getLogger("py.warnings") if not logger.handlers: logger.addHandler(NullHandler()) logger.warning("%s", s) def _captureWarnings(capture): """If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations. """ if sys.hexversion >= 0x2070000: raise RuntimeError( "_captureWarnings() should not be used on Python 2.7+" ) global _warnings_showwarning if capture: if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = _showwarning else: if _warnings_showwarning is not None: warnings.showwarning = _warnings_showwarning _warnings_showwarning = None if sys.hexversion >= 0x2070000: captureWarnings = logging.captureWarnings else: captureWarnings = _captureWarnings # https://mail.python.org/pipermail/python-ideas/2011-January/008958.html class staticproperty(object): """Property decorator for static methods.""" def __init__(self, function): self._function = function def __get__(self, instance, owner): return self._function() class classproperty(object): """Property decorator for class methods.""" def __init__(self, function): self._function = function def __get__(self, instance, owner): return self._function(owner) def run_py_func(manager_run, command_line, passthrough=False): """Runs a function of a python function with given sys.argv. Typically used to run the `main` function of an executable that provides no pythonic API. :param command_line: arguments to inject to sys.argv :type command_line: str (parsed with shlex) or iterable (passed verbatim) :param manager_run: function to run :type manager_run: function :param passthrough: allow stdout and and stderr to be printed to the terminal :type passthrough: bool :return: namedtuple(out, err, exit_code) """ sys_stderr_orig = sys.stderr sys_stdout_orig = sys.stdout sys.stdout = StringIO() sys.stderr = StringIO() sys_argv_orig = sys.argv formatter = logging.Formatter('%(message)s', '') # Log to StringIO log_to_new_stdout = logging.getLogger('run_py_func_new_stdout') log_handler_stdout = logging.StreamHandler(sys.stdout) log_handler_stdout.setFormatter(formatter) log_to_new_stdout.addHandler(log_handler_stdout) log_to_new_stderr = logging.getLogger('run_py_func_new_stderr') long_handler_new_stderr = logging.StreamHandler(sys.stderr) long_handler_new_stderr.setFormatter(formatter) log_to_new_stderr.addHandler(long_handler_new_stderr) # Also log to original stdout and stderr if passthrough: log_to_stderr_orig = logging.getLogger('run_py_func_stderr_orig') log_handler_stdout_orig = logging.StreamHandler(sys_stdout_orig) log_handler_stdout_orig.setFormatter(formatter) log_to_stderr_orig.addHandler(log_handler_stdout_orig) log_to_stderr_orig = logging.getLogger('run_py_func_stderr_orig') log_handler_stderr_orig = logging.StreamHandler(sys_stderr_orig) log_handler_stderr_orig.setFormatter(formatter) log_to_stderr_orig.addHandler(log_handler_stderr_orig) # Figure out how to handle `command_line` if isinstance(command_line, six.string_types): if sys.version_info < (2, 7, 3): # Work around non-unicode-capable versions of shlex.split sys.argv = map(lambda s: s.decode('utf8'), shlex.split(command_line.encode('utf8'))) else: sys.argv = shlex.split(command_line) else: sys.argv = command_line exit_code = None try: manager_run() except SystemExit as e: exit_code = e.code finally: out = sys.stdout.getvalue() err = sys.stderr.getvalue() # clear the standard output buffer sys.stdout.truncate(0) assert len(sys.stdout.getvalue()) == 0 sys.stderr.truncate(0) assert len(sys.stderr.getvalue()) == 0 sys.stderr = sys_stderr_orig sys.stdout = sys_stdout_orig sys.argv = sys_argv_orig return namedtuple('Res', ('out', 'err', 'exit_code'))(out, err, exit_code) def toposort_depends(*dependencies): """Set topological dependencies via decorator.""" def decorator(wrapped): wrapped.__toposort_dependencies = set(dependencies) return wrapped return decorator def toposort_extract(wrapped): """Extract topological dependencies.""" return getattr(wrapped, '__toposort_dependencies', set()) def toposort_send(signal, sender, **kwargs): """Send signal in topological order to all connected receivers.""" from toposort import toposort_flatten if not signal.receivers: return [] else: return [(receiver, receiver(sender, **kwargs)) for receiver in toposort_flatten({ receiver: toposort_extract(receiver) for receiver in signal.receivers_for(sender) })]
gpl-2.0
gibiansky/tensorflow
tensorflow/contrib/learn/python/learn/utils/inspect_checkpoint.py
19
2317
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple script for inspect checkpoint files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string("file_name", "", "Checkpoint filename") tf.app.flags.DEFINE_string("tensor_name", "", "Name of the tensor to inspect") def print_tensors_in_checkpoint_file(file_name, tensor_name): """Prints tensors in a checkpoint file. If no `tensor_name` is provided, prints the tensor names and shapes in the checkpoint file. If `tensor_name` is provided, prints the content of the tensor. Args: file_name: Name of the checkpoint file. tensor_name: Name of the tensor in the checkpoint file to print. """ try: if not tensor_name: variables = tf.contrib.framework.list_variables(file_name) for name, shape in variables: print("%s\t%s" % (name, str(shape))) else: print("tensor_name: ", tensor_name) print(tf.contrib.framework.load_variable(file_name, tensor_name)) except Exception as e: # pylint: disable=broad-except print(str(e)) if "corrupted compressed block contents" in str(e): print("It's likely that your checkpoint file has been compressed " "with SNAPPY.") def main(unused_argv): if not FLAGS.file_name: print("Usage: inspect_checkpoint --file_name=<checkpoint_file_name " "or directory> [--tensor_name=tensor_to_print]") sys.exit(1) else: print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name) if __name__ == "__main__": tf.app.run()
apache-2.0
jch1/models
pcl_rl/controller.py
7
16079
# Copyright 2017 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Controller coordinates sampling and training model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np import pickle import random flags = tf.flags gfile = tf.gfile FLAGS = flags.FLAGS def find_best_eps_lambda(rewards, lengths): """Find the best lambda given a desired epsilon = FLAGS.max_divergence.""" # perhaps not the best way to do this desired_div = FLAGS.max_divergence * np.mean(lengths) def calc_divergence(eps_lambda): max_reward = np.max(rewards) logz = (max_reward / eps_lambda + np.log(np.mean(np.exp((rewards - max_reward) / eps_lambda)))) exprr = np.mean(np.exp(rewards / eps_lambda - logz) * rewards / eps_lambda) return exprr - logz left = 0.0 right = 1000.0 if len(rewards) <= 8: return (left + right) / 2 num_iter = max(4, 1 + int(np.log((right - left) / 0.1) / np.log(2.0))) for _ in xrange(num_iter): mid = (left + right) / 2 cur_div = calc_divergence(mid) if cur_div > desired_div: left = mid else: right = mid return (left + right) / 2 class Controller(object): def __init__(self, env, env_spec, internal_dim, use_online_batch=True, batch_by_steps=False, unify_episodes=False, replay_batch_size=None, max_step=None, cutoff_agent=1, save_trajectories_file=None, use_trust_region=False, use_value_opt=False, update_eps_lambda=False, prioritize_by='rewards', get_model=None, get_replay_buffer=None, get_buffer_seeds=None): self.env = env self.env_spec = env_spec self.internal_dim = internal_dim self.use_online_batch = use_online_batch self.batch_by_steps = batch_by_steps self.unify_episodes = unify_episodes self.replay_batch_size = replay_batch_size self.max_step = max_step self.cutoff_agent = cutoff_agent self.save_trajectories_file = save_trajectories_file self.use_trust_region = use_trust_region self.use_value_opt = use_value_opt self.update_eps_lambda = update_eps_lambda self.prioritize_by = prioritize_by self.model = get_model() self.replay_buffer = get_replay_buffer() self.seed_replay_buffer(get_buffer_seeds()) self.internal_state = np.array([self.initial_internal_state()] * len(self.env)) self.last_obs = self.env_spec.initial_obs(len(self.env)) self.last_act = self.env_spec.initial_act(len(self.env)) self.last_pad = np.zeros(len(self.env)) self.start_episode = np.array([True] * len(self.env)) self.step_count = np.array([0] * len(self.env)) self.episode_running_rewards = np.zeros(len(self.env)) self.episode_running_lengths = np.zeros(len(self.env)) self.episode_rewards = [] self.episode_lengths = [] self.total_rewards = [] self.best_batch_rewards = None def setup(self): self.model.setup() def initial_internal_state(self): return np.zeros(self.model.policy.rnn_state_dim) def _sample_episodes(self, sess, greedy=False): """Sample episodes from environment using model.""" # reset environments as necessary obs_after_reset = self.env.reset_if(self.start_episode) for i, obs in enumerate(obs_after_reset): if obs is not None: self.step_count[i] = 0 self.internal_state[i] = self.initial_internal_state() for j in xrange(len(self.env_spec.obs_dims)): self.last_obs[j][i] = obs[j] for j in xrange(len(self.env_spec.act_dims)): self.last_act[j][i] = -1 self.last_pad[i] = 0 # maintain episode as a single unit if the last sampling # batch ended before the episode was terminated if self.unify_episodes: assert len(obs_after_reset) == 1 new_ep = obs_after_reset[0] is not None else: new_ep = True self.start_id = 0 if new_ep else len(self.all_obs[:]) initial_state = self.internal_state all_obs = [] if new_ep else self.all_obs[:] all_act = ([self.last_act] if new_ep else self.all_act[:]) all_pad = [] if new_ep else self.all_pad[:] rewards = [] if new_ep else self.rewards[:] # start stepping in the environments step = 0 while not self.env.all_done(): self.step_count += 1 - np.array(self.env.dones) next_internal_state, sampled_actions = self.model.sample_step( sess, self.last_obs, self.internal_state, self.last_act, greedy=greedy) env_actions = self.env_spec.convert_actions_to_env(sampled_actions) next_obs, reward, next_dones, _ = self.env.step(env_actions) all_obs.append(self.last_obs) all_act.append(sampled_actions) all_pad.append(self.last_pad) rewards.append(reward) self.internal_state = next_internal_state self.last_obs = next_obs self.last_act = sampled_actions self.last_pad = np.array(next_dones).astype('float32') step += 1 if self.max_step and step >= self.max_step: break self.all_obs = all_obs[:] self.all_act = all_act[:] self.all_pad = all_pad[:] self.rewards = rewards[:] # append final observation all_obs.append(self.last_obs) return initial_state, all_obs, all_act, rewards, all_pad def sample_episodes(self, sess): """Sample steps from the environment until we have enough for a batch.""" # check if last batch ended with episode that was not terminated if self.unify_episodes: self.all_new_ep = self.start_episode[0] # sample episodes until we either have enough episodes or enough steps episodes = [] total_steps = 0 while total_steps < self.max_step * len(self.env): (initial_state, observations, actions, rewards, pads) = self._sample_episodes(sess) observations = zip(*observations) actions = zip(*actions) terminated = np.array(self.env.dones) self.total_rewards = np.sum(np.array(rewards[self.start_id:]) * (1 - np.array(pads[self.start_id:])), axis=0) self.episode_running_rewards *= 1 - self.start_episode self.episode_running_lengths *= 1 - self.start_episode self.episode_running_rewards += self.total_rewards self.episode_running_lengths += np.sum(1 - np.array(pads[self.start_id:]), axis=0) episodes.extend(self.convert_from_batched_episodes( initial_state, observations, actions, rewards, terminated, pads)) total_steps += np.sum(1 - np.array(pads)) # set next starting episodes self.start_episode = np.logical_or(terminated, self.step_count >= self.cutoff_agent) episode_rewards = self.episode_running_rewards[self.start_episode].tolist() self.episode_rewards.extend(episode_rewards) self.episode_lengths.extend(self.episode_running_lengths[self.start_episode].tolist()) self.episode_rewards = self.episode_rewards[-100:] self.episode_lengths = self.episode_lengths[-100:] if (self.save_trajectories_file is not None and (self.best_batch_rewards is None or np.mean(self.total_rewards) > self.best_batch_rewards)): self.best_batch_rewards = np.mean(self.total_rewards) my_episodes = self.convert_from_batched_episodes( initial_state, observations, actions, rewards, terminated, pads) with gfile.GFile(self.save_trajectories_file, 'w') as f: pickle.dump(my_episodes, f) if not self.batch_by_steps: return (initial_state, observations, actions, rewards, terminated, pads) return self.convert_to_batched_episodes(episodes) def _train(self, sess, observations, initial_state, actions, rewards, terminated, pads): """Train model using batch.""" if self.use_trust_region: # use trust region to optimize policy loss, _, summary = self.model.trust_region_step( sess, observations, initial_state, actions, rewards, terminated, pads, avg_episode_reward=np.mean(self.episode_rewards)) else: # otherwise use simple gradient descent on policy loss, _, summary = self.model.train_step( sess, observations, initial_state, actions, rewards, terminated, pads, avg_episode_reward=np.mean(self.episode_rewards)) if self.use_value_opt: # optionally perform specific value optimization self.model.fit_values( sess, observations, initial_state, actions, rewards, terminated, pads) return loss, summary def train(self, sess): """Sample some episodes and train on some episodes.""" cur_step = sess.run(self.model.inc_global_step) self.cur_step = cur_step # on the first iteration, set target network close to online network if self.cur_step == 0: for _ in xrange(100): sess.run(self.model.copy_op) # on other iterations, just perform single target <-- online operation sess.run(self.model.copy_op) # sample from env (initial_state, observations, actions, rewards, terminated, pads) = self.sample_episodes(sess) # add to replay buffer self.add_to_replay_buffer( initial_state, observations, actions, rewards, terminated, pads) loss, summary = 0, None # train on online batch if self.use_online_batch: loss, summary = self._train( sess, observations, initial_state, actions, rewards, terminated, pads) # update relative entropy coefficient if self.update_eps_lambda: episode_rewards = np.array(self.episode_rewards) episode_lengths = np.array(self.episode_lengths) eps_lambda = find_best_eps_lambda(episode_rewards, episode_lengths) sess.run(self.model.objective.assign_eps_lambda, feed_dict={self.model.objective.new_eps_lambda: eps_lambda}) # train on replay batch replay_batch, replay_probs = self.get_from_replay_buffer( self.replay_batch_size) if replay_batch: (initial_state, observations, actions, rewards, terminated, pads) = replay_batch loss, summary = self._train( sess, observations, initial_state, actions, rewards, terminated, pads) return loss, summary, self.total_rewards, self.episode_rewards def eval(self, sess): """Use greedy sampling.""" (initial_state, observations, actions, rewards, pads) = self._sample_episodes(sess, greedy=True) total_rewards = np.sum(np.array(rewards) * (1 - np.array(pads)), axis=0) return np.mean(total_rewards) def convert_from_batched_episodes( self, initial_state, observations, actions, rewards, terminated, pads): """Convert time-major batch of episodes to batch-major list of episodes.""" rewards = np.array(rewards) pads = np.array(pads) observations = [np.array(obs) for obs in observations] actions = [np.array(act) for act in actions] total_rewards = np.sum(rewards * (1 - pads), axis=0) total_length = np.sum(1 - pads, axis=0).astype('int32') episodes = [] num_episodes = rewards.shape[1] for i in xrange(num_episodes): length = total_length[i] ep_initial = initial_state[i] ep_obs = [obs[:length, i, ...] for obs in observations] ep_act = [act[:length + 1, i, ...] for act in actions] ep_rewards = rewards[:length, i] episodes.append( [ep_initial, ep_obs, ep_act, ep_rewards, terminated[i]]) return episodes def convert_to_batched_episodes(self, episodes, max_length=None): """Convert batch-major list of episodes to time-major batch of episodes.""" lengths = [len(ep[-2]) for ep in episodes] max_length = max_length or max(lengths) new_episodes = [] for ep, length in zip(episodes, lengths): initial, observations, actions, rewards, terminated = ep observations = [np.resize(obs, [max_length + 1] + list(obs.shape)[1:]) for obs in observations] actions = [np.resize(act, [max_length + 1] + list(act.shape)[1:]) for act in actions] pads = np.array([0] * length + [1] * (max_length - length)) rewards = np.resize(rewards, [max_length]) * (1 - pads) new_episodes.append([initial, observations, actions, rewards, terminated, pads]) (initial, observations, actions, rewards, terminated, pads) = zip(*new_episodes) observations = [np.swapaxes(obs, 0, 1) for obs in zip(*observations)] actions = [np.swapaxes(act, 0, 1) for act in zip(*actions)] rewards = np.transpose(rewards) pads = np.transpose(pads) return (initial, observations, actions, rewards, terminated, pads) def add_to_replay_buffer(self, initial_state, observations, actions, rewards, terminated, pads): """Add batch of episodes to replay buffer.""" if self.replay_buffer is None: return rewards = np.array(rewards) pads = np.array(pads) total_rewards = np.sum(rewards * (1 - pads), axis=0) episodes = self.convert_from_batched_episodes( initial_state, observations, actions, rewards, terminated, pads) priorities = (total_rewards if self.prioritize_by == 'reward' else self.cur_step) if not self.unify_episodes or self.all_new_ep: self.last_idxs = self.replay_buffer.add( episodes, priorities) else: # If we are unifying episodes, we attempt to # keep them unified in the replay buffer. # The first episode sampled in the current batch is a # continuation of the last episode from the previous batch self.replay_buffer.add(episodes[:1], priorities, self.last_idxs[-1:]) if len(episodes) > 1: self.replay_buffer.add(episodes[1:], priorities) def get_from_replay_buffer(self, batch_size): """Sample a batch of episodes from the replay buffer.""" if self.replay_buffer is None or len(self.replay_buffer) < 1 * batch_size: return None, None desired_count = batch_size * self.max_step # in the case of batch_by_steps, we sample larger and larger # amounts from the replay buffer until we have enough steps. while True: if batch_size > len(self.replay_buffer): batch_size = len(self.replay_buffer) episodes, probs = self.replay_buffer.get_batch(batch_size) count = sum(len(ep[-2]) for ep in episodes) if count >= desired_count or not self.batch_by_steps: break if batch_size == len(self.replay_buffer): return None, None batch_size *= 1.2 return (self.convert_to_batched_episodes(episodes), probs) def seed_replay_buffer(self, episodes): """Seed the replay buffer with some episodes.""" if self.replay_buffer is None: return # just need to add initial state for i in xrange(len(episodes)): episodes[i] = [self.initial_internal_state()] + episodes[i] self.replay_buffer.seed_buffer(episodes)
apache-2.0
ZeitOnline/zeit.retresco
setup.py
1
1668
from setuptools import setup, find_packages import os.path def project_path(*names): return os.path.join(os.path.dirname(__file__), *names) setup( name='zeit.retresco', version='1.32.3.dev0', author='gocept, Zeit Online', author_email='zon-backend@zeit.de', url='http://www.zeit.de/', description="vivi interface to retresco keyword and topic management", long_description='\n\n'.join(open(project_path(name)).read() for name in ( 'README.rst', 'CHANGES.txt', )), packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, zip_safe=False, license='BSD', namespace_packages=['zeit'], install_requires=[ 'elasticsearch >=2.0.0, <3.0.0', 'gocept.runner', 'grokcore.component', 'lxml', 'mock', 'plone.testing', 'requests', 'setuptools', 'zeit.cms >= 3.28.0.dev0', 'zeit.content.author', 'zeit.content.article', 'zeit.content.gallery', 'zeit.content.image', 'zeit.content.infobox', 'zeit.content.link', 'zeit.content.portraitbox', 'zeit.content.rawxml', 'zeit.content.text', 'zeit.content.volume', 'zeit.find >= 3.0.0', 'zeit.push', 'zeit.seo', 'zope.component', 'zope.dottedname', 'zope.interface', 'zope.publisher', 'zope.testbrowser', ], entry_points={ 'console_scripts': [ 'update-topiclist=zeit.retresco.connection:update_topiclist', 'tms-reindex-object=zeit.retresco.update:reindex', ] }, )
bsd-3-clause
mattclark/osf.io
api_tests/nodes/views/test_node_sparse_fieldsets.py
2
10117
import pytest from api.base.settings.defaults import API_BASE from osf_tests.factories import ( ProjectFactory, AuthUserFactory, PrivateLinkFactory, ) from osf.utils import permissions @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation class TestNodeSparseFieldsList: @pytest.fixture() def deleted_project(self): return ProjectFactory(is_deleted=True) @pytest.fixture() def private_project(self, user): return ProjectFactory(is_public=False, creator=user) @pytest.fixture() def public_project(self, user): return ProjectFactory(is_public=True, creator=user) @pytest.fixture() def url(self): return '/{}nodes/?fields[nodes]='.format(API_BASE) def test_node_sparse_fields_list( self, app, user, deleted_project, private_project, public_project, url): # test_empty_fields_returns_no_attributes res = app.get(url) node_json = res.json['data'][0] assert node_json['attributes'] == {} assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes']) # test_sparse_fields_includes_relationships res = app.get(url + 'children') node_json = res.json['data'][0] assert node_json['attributes'] == {} assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes', 'relationships']) assert node_json['relationships']['children']['links']['related']['href'].endswith( '/{}nodes/{}/children/'.format(API_BASE, public_project._id)) # test_returns_expected_nodes res = app.get(url + 'title') data = res.json['data'] ids = [each['id'] for each in data] assert public_project._id in ids assert deleted_project._id not in ids assert private_project._id not in ids assert len(data) == 1 node_json = data[0] assert node_json['attributes']['title'] == public_project.title assert len(node_json['attributes']) == 1 assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes']) # test_filtering_by_id url = '/{}nodes/?filter[id]={}&fields[nodes]='.format( API_BASE, public_project._id) res = app.get(url) assert [ each['id'] for each in res.json['data']] == [ public_project._id] node_json = res.json['data'][0] assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes']) assert node_json['attributes'] == {} # test_filtering_by_excluded_field url = '/{}nodes/?filter[title]={}&fields[nodes]='.format( API_BASE, public_project.title) res = app.get(url) assert [ each['id'] for each in res.json['data']] == [ public_project._id] node_json = res.json['data'][0] assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes']) assert node_json['attributes'] == {} # test_create_with_sparse_fields payload = { 'data': { 'type': 'nodes', 'attributes': { 'title': 'New Title', 'description': 'What a test', 'category': 'project', 'public': True, } } } res = app.post_json_api(url, payload, auth=user.auth) assert res.status_code == 201 assert set(res.json['data'].keys()) == set( ['links', 'type', 'id', 'attributes']) assert res.json['data']['attributes'] == {} @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation class TestNodeSparseFieldsDetail: @pytest.fixture() def node(self, user): return ProjectFactory(is_public=True, creator=user) @pytest.fixture() def url(self, node): return '/{}nodes/{}/'.format(API_BASE, node._id) def test_node_sparse_fields_detail_non_mutating_tests( self, app, user, node, url): # test_empty_fields_returns_no_attributes res = app.get(url + '?fields[nodes]=') node_json = res.json['data'] assert node_json['attributes'] == {} assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes']) # test_embed_sparse_same_type child = ProjectFactory(parent=node, is_public=True, creator=user) res_url = '{}?embed=children&fields[nodes]=title,children'.format(url) res = app.get(res_url) node_json = res.json['data'] assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes', 'relationships', 'embeds']) assert node_json['attributes'].keys() == ['title'] assert set(node_json['embeds']['children']['data'][0].keys()) == set( ['links', 'type', 'id', 'attributes', 'relationships']) assert node_json['embeds']['children']['data'][0]['attributes'].keys() == [ 'title'] assert node_json['embeds']['children']['data'][0]['attributes']['title'] == child.title # test_embed_sparse_different_types res_url = '{}?embed=contributors&fields[nodes]=title,contributors'.format( url) res = app.get(res_url) node_json = res.json['data'] assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes', 'embeds', 'relationships']) assert node_json['attributes'].keys() == ['title'] assert len(node_json['embeds']['contributors']['data']) == 1 assert node_json['embeds']['contributors']['data'][0]['id'] == '{}-{}'.format( node._id, user._id) assert len(node_json['embeds']['contributors'] ['data'][0]['attributes']) > 1 # test_sparse_embedded_type res_url = '{}?embed=contributors&fields[contributors]='.format(url) res = app.get(res_url) node_json = res.json['data'] assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes', 'embeds', 'relationships']) assert len(node_json['attributes'].keys()) > 1 assert len(node_json['embeds']['contributors']['data']) == 1 assert node_json['embeds']['contributors']['data'][0]['id'] == '{}-{}'.format( node._id, user._id) assert len(node_json['embeds']['contributors'] ['data'][0]['attributes']) == 0 # test_multiple_sparse_types res_url = '{}?fields[nodes]=contributors,title&embed=contributors&fields[contributors]=bibliographic'.format( url) res = app.get(res_url) node_json = res.json['data'] assert set(node_json.keys()) == set( ['links', 'type', 'id', 'attributes', 'embeds', 'relationships']) assert node_json['attributes'].keys() == ['title'] assert len(node_json['embeds']['contributors']['data']) == 1 assert node_json['embeds']['contributors']['data'][0]['id'] == '{}-{}'.format( node._id, user._id) assert node_json['embeds']['contributors']['data'][0]['attributes'].keys() == [ 'bibliographic'] def test_update_with_sparse_fields(self, app, user, node, url): res_url = '{}?fields[nodes]='.format(url) old_title = node.title payload = {'data': { 'id': node._id, 'type': 'nodes', 'attributes': { 'title': 'new title' } }} res = app.patch_json_api(res_url, payload, auth=user.auth) assert res.status_code == 200 assert res.json['data']['attributes'] == {} node.reload() assert node.title != old_title assert node.title == 'new title' @pytest.mark.django_db class TestSparseViewOnlyLinks: @pytest.fixture() def creation_user(self): return AuthUserFactory() @pytest.fixture() def viewing_user(self): return AuthUserFactory() @pytest.fixture() def contributing_read_user(self): return AuthUserFactory() @pytest.fixture() def contributing_write_user(self): return AuthUserFactory() @pytest.fixture() def private_node_one( self, creation_user, contributing_read_user, contributing_write_user): private_node_one = ProjectFactory( is_public=False, creator=creation_user, title='Private One') private_node_one.add_contributor( contributing_read_user, permissions=permissions.READ, save=True) private_node_one.add_contributor( contributing_write_user, permissions=permissions.WRITE, save=True) return private_node_one @pytest.fixture() def private_node_one_anonymous_link(self, private_node_one): private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True) private_node_one_anonymous_link.nodes.add(private_node_one) private_node_one_anonymous_link.save() return private_node_one_anonymous_link @pytest.fixture() def private_node_one_url(self, private_node_one): return '/{}nodes/{}/'.format(API_BASE, private_node_one._id) def test_sparse_fields_with_anonymous_link( self, app, private_node_one_url, private_node_one_anonymous_link): res = app.get(private_node_one_url, { 'view_only': private_node_one_anonymous_link.key, 'fields[nodes]': 'title,current_user_can_comment,contributors', 'fields[contributors]': 'id', 'embed': 'contributors' }) # current_user_can_comment and contributors are anonymized fields, should be removed assert res.status_code == 200 assert res.json['data']['attributes'].keys() == ['title'] embeds = res.json['data'].get('embeds', None) assert embeds is None or 'contributors' not in embeds
apache-2.0
hail-is/hail
hail/python/hail/ir/table_ir.py
1
25125
import json import hail as hl from hail.expr.types import dtype from hail.ir.base_ir import BaseIR, TableIR from hail.utils.java import Env from hail.utils.misc import escape_str, parsable_strings, dump_json, escape_id class MatrixRowsTable(TableIR): def __init__(self, child): super().__init__(child) self.child = child def _compute_type(self): self._type = hl.ttable(self.child.typ.global_type, self.child.typ.row_type, self.child.typ.row_key) class TableJoin(TableIR): def __init__(self, left, right, join_type, join_key): super().__init__(left, right) self.left = left self.right = right self.join_type = join_type self.join_key = join_key def head_str(self): return f'{escape_id(self.join_type)} {self.join_key}' def _eq(self, other): return self.join_key == other.join_key and \ self.join_type == other.join_type def _compute_type(self): left_typ = self.left.typ right_typ = self.right.typ self._type = hl.ttable(left_typ.global_type._concat(right_typ.global_type), left_typ.key_type._concat(left_typ.value_type)._concat(right_typ.value_type), left_typ.row_key + right_typ.row_key[self.join_key:]) class TableLeftJoinRightDistinct(TableIR): def __init__(self, left, right, root): super().__init__(left, right) self.left = left self.right = right self.root = root def head_str(self): return escape_id(self.root) def _eq(self, other): return self.root == other.root def _compute_type(self): left_typ = self.left.typ right_typ = self.right.typ self._type = hl.ttable( left_typ.global_type, left_typ.row_type._insert_field(self.root, right_typ.value_type), left_typ.row_key) class TableIntervalJoin(TableIR): def __init__(self, left, right, root, product=False): super().__init__(left, right) self.left = left self.right = right self.root = root self.product = product def head_str(self): return f'{escape_id(self.root)} {self.product}' def _eq(self, other): return self.root == other.root and self.product == other.product def _compute_type(self): left_typ = self.left.typ right_typ = self.right.typ if self.product: right_val_typ = left_typ.row_type._insert_field(self.root, hl.tarray(right_typ.value_type)) else: right_val_typ = left_typ.row_type._insert_field(self.root, right_typ.value_type) self._type = hl.ttable( left_typ.global_type, right_val_typ, left_typ.row_key) class TableUnion(TableIR): def __init__(self, children): super().__init__(*children) self.children = children def _compute_type(self): for c in self.children: c.typ # force self._type = self.children[0].typ class TableRange(TableIR): def __init__(self, n, n_partitions): super().__init__() self.n = n self.n_partitions = n_partitions def head_str(self): return f'{self.n} {self.n_partitions}' def _eq(self, other): return self.n == other.n and self.n_partitions == other.n_partitions def _compute_type(self): self._type = hl.ttable(hl.tstruct(), hl.tstruct(idx=hl.tint32), ['idx']) class TableMapGlobals(TableIR): def __init__(self, child, new_globals): super().__init__(child, new_globals) self.child = child self.new_globals = new_globals def _compute_type(self): self.new_globals._compute_type(self.child.typ.global_env(), None) self._type = hl.ttable(self.new_globals.typ, self.child.typ.row_type, self.child.typ.row_key) def renderable_bindings(self, i, default_value=None): return self.child.typ.global_env(default_value) if i == 1 else {} class TableExplode(TableIR): def __init__(self, child, path): super().__init__(child) self.child = child self.path = path def head_str(self): return parsable_strings(self.path) def _eq(self, other): return self.path == other.path def _compute_type(self): atyp = self.child.typ.row_type._index_path(self.path) self._type = hl.ttable(self.child.typ.global_type, self.child.typ.row_type._insert(self.path, atyp.element_type), self.child.typ.row_key) class TableKeyBy(TableIR): def __init__(self, child, keys, is_sorted=False): super().__init__(child) self.child = child self.keys = keys self.is_sorted = is_sorted def head_str(self): return '({}) {}'.format(' '.join([escape_id(x) for x in self.keys]), self.is_sorted) def _eq(self, other): return self.keys == other.keys and self.is_sorter == other.is_sorted def _compute_type(self): self._type = hl.ttable(self.child.typ.global_type, self.child.typ.row_type, self.keys) class TableMapRows(TableIR): def __init__(self, child, new_row): super().__init__(child, new_row) self.child = child self.new_row = new_row def _compute_type(self): # agg_env for scans self.new_row._compute_type(self.child.typ.row_env(), self.child.typ.row_env()) self._type = hl.ttable( self.child.typ.global_type, self.new_row.typ, self.child.typ.row_key) def renderable_bindings(self, i, default_value=None): if i == 1: env = self.child.typ.row_env(default_value) env[BaseIR.agg_capability] = default_value return env else: return {} def renderable_scan_bindings(self, i, default_value=None): return self.child.typ.row_env(default_value) if i == 1 else {} class TableMapPartitions(TableIR): def __init__(self, child, global_name, partition_stream_name, body): super().__init__(child, body) self.child = child self.body = body self.global_name = global_name self.partition_stream_name = partition_stream_name def _compute_type(self): self.body._compute_type({self.global_name: self.child.typ.global_type, self.partition_stream_name: hl.tstream(self.child.typ.row_type)}, {}) assert isinstance(self.body.typ, hl.tstream) and isinstance(self.body.typ.element_type, hl.tstruct) new_row_type = self.body.typ.element_type for k in self.child.typ.row_key: assert k in new_row_type self._type = hl.ttable(self.child.typ.global_type, new_row_type, self.child.typ.row_key) def renderable_bindings(self, i, default_value=None): if i == 1: return {self.global_name: self.child.typ.global_type if default_value is None else default_value, self.partition_stream_name: hl.tstream( self.child.typ.row_type) if default_value is None else default_value} else: return {} def head_str(self): return f'{escape_id(self.global_name)} {escape_id(self.partition_stream_name)}' def _eq(self, other): return self.global_name == other.global_name and self.partition_stream_name == other.partition_stream_name class TableRead(TableIR): def __init__(self, reader, drop_rows=False): super().__init__() self.reader = reader self.drop_rows = drop_rows def head_str(self): return f'None {self.drop_rows} "{self.reader.render()}"' def _eq(self, other): return self.reader == other.reader and self.drop_rows == other.drop_rows def _compute_type(self): self._type = Env.backend().table_type(self) class TableImport(TableIR): def __init__(self, paths, typ, reader_options): super().__init__() self.paths = paths self._typ = typ self.reader_options = reader_options def head_str(self): return '(({}) {} {}'.format( ' '.join([escape_str(path) for path in self.paths]), self._typ._parsable_string(), escape_str(json.dumps(self.reader_options))) def _eq(self, other): return self.paths == other.paths and self.typ == other.typ and self.reader_options == other.reader_options def _compute_type(self): self._type = Env.backend().table_type(self) class MatrixEntriesTable(TableIR): def __init__(self, child): super().__init__(child) self.child = child def _compute_type(self): child_typ = self.child.typ self._type = hl.ttable(child_typ.global_type, child_typ.row_type ._concat(child_typ.col_type) ._concat(child_typ.entry_type), child_typ.row_key + child_typ.col_key) class TableFilter(TableIR): def __init__(self, child, pred): super().__init__(child, pred) self.child = child self.pred = pred def _compute_type(self): self.pred._compute_type(self.child.typ.row_env(), None) self._type = self.child.typ def renderable_bindings(self, i, default_value=None): return self.child.typ.row_env(default_value) if i == 1 else {} class TableKeyByAndAggregate(TableIR): def __init__(self, child, expr, new_key, n_partitions, buffer_size): super().__init__(child, expr, new_key) self.child = child self.expr = expr self.new_key = new_key self.n_partitions = n_partitions self.buffer_size = buffer_size def head_str(self): return f'{self.n_partitions} {self.buffer_size}' def _eq(self, other): return self.n_partitions == other.n_partitions and self.buffer_size == other.buffer_size def _compute_type(self): self.expr._compute_type(self.child.typ.global_env(), self.child.typ.row_env()) self.new_key._compute_type(self.child.typ.row_env(), None) self._type = hl.ttable(self.child.typ.global_type, self.new_key.typ._concat(self.expr.typ), list(self.new_key.typ)) def renderable_bindings(self, i, default_value=None): if i == 1: env = self.child.typ.global_env(default_value) env[BaseIR.agg_capability] = default_value return env elif i == 2: return self.child.typ.row_env(default_value) else: return {} def renderable_agg_bindings(self, i, default_value=None): return self.child.typ.row_env(default_value) if i == 1 else {} class TableGroupWithinPartitions(TableIR): def __init__(self, child, name, n): super().__init__(child) self.child = child self.name = name self.n = n def head_str(self): return f'{escape_str(self.name)} {self.n}' def _compute_type(self): child_typ = self.child.typ self._type = hl.ttable(child_typ.global_type, child_typ.key_type._insert_field(self.name, hl.tarray(child_typ.row_type)), child_typ.row_key) class TableAggregateByKey(TableIR): def __init__(self, child, expr): super().__init__(child, expr) self.child = child self.expr = expr def _compute_type(self): child_typ = self.child.typ self.expr._compute_type(child_typ.global_env(), child_typ.row_env()) self._type = hl.ttable(child_typ.global_type, child_typ.key_type._concat(self.expr.typ), child_typ.row_key) def renderable_bindings(self, i, default_value=None): if i == 1: env = self.child.typ.row_env(default_value) env[BaseIR.agg_capability] = default_value return env else: return {} def renderable_agg_bindings(self, i, default_value=None): return self.child.typ.row_env(default_value) if i == 1 else {} class MatrixColsTable(TableIR): def __init__(self, child): super().__init__(child) self.child = child def _compute_type(self): self._type = hl.ttable(self.child.typ.global_type, self.child.typ.col_type, self.child.typ.col_key) class TableParallelize(TableIR): def __init__(self, rows_and_global, n_partitions): super().__init__(rows_and_global) self.rows_and_global = rows_and_global self.n_partitions = n_partitions def head_str(self): return self.n_partitions def _eq(self, other): return self.n_partitions == other.n_partitions def _compute_type(self): self.rows_and_global._compute_type({}, None) self._type = hl.ttable(self.rows_and_global.typ['global'], self.rows_and_global.typ['rows'].element_type, []) class TableHead(TableIR): def __init__(self, child, n): super().__init__(child) self.child = child self.n = n def head_str(self): return self.n def _eq(self, other): return self.n == other.n def _compute_type(self): self._type = self.child.typ class TableTail(TableIR): def __init__(self, child, n): super().__init__(child) self.child = child self.n = n def head_str(self): return self.n def _eq(self, other): return self.n == other.n def _compute_type(self): self._type = self.child.typ class TableOrderBy(TableIR): def __init__(self, child, sort_fields): super().__init__(child) self.child = child self.sort_fields = sort_fields def head_str(self): return f'({" ".join([escape_id(order + f) for (f, order) in self.sort_fields])})' def _eq(self, other): return self.sort_fields == other.sort_fields def _compute_type(self): self._type = hl.ttable(self.child.typ.global_type, self.child.typ.row_type, []) class TableDistinct(TableIR): def __init__(self, child): super().__init__(child) self.child = child def _compute_type(self): self._type = self.child.typ class RepartitionStrategy: SHUFFLE = 0 COALESCE = 1 NAIVE_COALESCE = 2 class TableRepartition(TableIR): def __init__(self, child, n, strategy): super().__init__(child) self.child = child self.n = n self.strategy = strategy def head_str(self): return f'{self.n} {self.strategy}' def _eq(self, other): return self.n == other.n and self.strategy == other.strategy def _compute_type(self): self._type = self.child.typ class CastMatrixToTable(TableIR): def __init__(self, child, entries_field_name, cols_field_name): super().__init__(child) self.child = child self.entries_field_name = entries_field_name self.cols_field_name = cols_field_name def head_str(self): return f'"{escape_str(self.entries_field_name)}" "{escape_str(self.cols_field_name)}"' def _eq(self, other): return self.entries_field_name == other.entries_field_name and self.cols_field_name == other.cols_field_name def _compute_type(self): child_typ = self.child.typ self._type = hl.ttable(child_typ.global_type._insert_field(self.cols_field_name, hl.tarray(child_typ.col_type)), child_typ.row_type._insert_field(self.entries_field_name, hl.tarray(child_typ.entry_type)), child_typ.row_key) class TableRename(TableIR): def __init__(self, child, row_map, global_map): super().__init__(child) self.child = child self.row_map = row_map self.global_map = global_map def head_str(self): return f'{parsable_strings(self.row_map.keys())} ' \ f'{parsable_strings(self.row_map.values())} ' \ f'{parsable_strings(self.global_map.keys())} ' \ f'{parsable_strings(self.global_map.values())} ' def _eq(self, other): return self.row_map == other.row_map and self.global_map == other.global_map def _compute_type(self): self._type = self.child.typ._rename(self.global_map, self.row_map) class TableMultiWayZipJoin(TableIR): def __init__(self, children, data_name, global_name): super().__init__(*children) self.children = children self.data_name = data_name self.global_name = global_name def head_str(self): return f'"{escape_str(self.data_name)}" "{escape_str(self.global_name)}"' def _eq(self, other): return self.data_name == other.data_name and self.global_name == other.global_name def _compute_type(self): for c in self.children: c.typ # force child_typ = self.children[0].typ self._type = hl.ttable( hl.tstruct(**{self.global_name: hl.tarray(child_typ.global_type)}), child_typ.key_type._insert_field(self.data_name, hl.tarray(child_typ.value_type)), child_typ.row_key) class TableFilterIntervals(TableIR): def __init__(self, child, intervals, point_type, keep): super().__init__(child) self.child = child self.intervals = intervals self.point_type = point_type self.keep = keep def head_str(self): return f'{dump_json(hl.tarray(hl.tinterval(self.point_type))._convert_to_json(self.intervals))} {self.keep}' def _eq(self, other): return self.intervals == other.intervals and self.point_type == other.point_type and self.keep == other.keep def _compute_type(self): self._type = self.child.typ class TableToTableApply(TableIR): def __init__(self, child, config): super().__init__(child) self.child = child self.config = config def head_str(self): return dump_json(self.config) def _eq(self, other): return self.config == other.config def _compute_type(self): name = self.config['name'] if name == 'TableFilterPartitions': self._type = self.child.typ else: assert name in ('VEP', 'Nirvana'), name self._type = Env.backend().table_type(self) def regression_test_type(test): glm_fit_schema = dtype('struct{n_iterations:int32,converged:bool,exploded:bool}') if test == 'wald': return dtype( f'struct{{beta:float64,standard_error:float64,z_stat:float64,p_value:float64,fit:{glm_fit_schema}}}') elif test == 'lrt': return dtype(f'struct{{beta:float64,chi_sq_stat:float64,p_value:float64,fit:{glm_fit_schema}}}') elif test == 'score': return dtype('struct{chi_sq_stat:float64,p_value:float64}') else: assert test == 'firth', test return dtype(f'struct{{beta:float64,chi_sq_stat:float64,p_value:float64,fit:{glm_fit_schema}}}') class MatrixToTableApply(TableIR): def __init__(self, child, config): super().__init__(child) self.child = child self.config = config def head_str(self): return dump_json(self.config) def _eq(self, other): return self.config == other.config def _compute_type(self): name = self.config['name'] child_typ = self.child.typ if name == 'LinearRegressionRowsChained': pass_through = self.config['passThrough'] chained_schema = hl.dtype( 'struct{n:array<int32>,sum_x:array<float64>,y_transpose_x:array<array<float64>>,beta:array<array<float64>>,standard_error:array<array<float64>>,t_stat:array<array<float64>>,p_value:array<array<float64>>}') self._type = hl.ttable( child_typ.global_type, (child_typ.row_key_type ._insert_fields(**{f: child_typ.row_type[f] for f in pass_through}) ._concat(chained_schema)), child_typ.row_key) elif name == 'LinearRegressionRowsSingle': pass_through = self.config['passThrough'] chained_schema = hl.dtype( 'struct{n:int32,sum_x:float64,y_transpose_x:array<float64>,beta:array<float64>,standard_error:array<float64>,t_stat:array<float64>,p_value:array<float64>}') self._type = hl.ttable( child_typ.global_type, (child_typ.row_key_type ._insert_fields(**{f: child_typ.row_type[f] for f in pass_through}) ._concat(chained_schema)), child_typ.row_key) elif name == 'LogisticRegression': pass_through = self.config['passThrough'] logreg_type = hl.tstruct(logistic_regression=hl.tarray(regression_test_type(self.config['test']))) self._type = hl.ttable( child_typ.global_type, (child_typ.row_key_type ._insert_fields(**{f: child_typ.row_type[f] for f in pass_through}) ._concat(logreg_type)), child_typ.row_key) elif name == 'PoissonRegression': pass_through = self.config['passThrough'] poisreg_type = regression_test_type(self.config['test']) self._type = hl.ttable( child_typ.global_type, (child_typ.row_key_type ._insert_fields(**{f: child_typ.row_type[f] for f in pass_through}) ._concat(poisreg_type)), child_typ.row_key) elif name == 'Skat': key_field = self.config['keyField'] key_type = child_typ.row_type[key_field] skat_type = hl.dtype(f'struct{{id:{key_type},size:int32,q_stat:float64,p_value:float64,fault:int32}}') self._type = hl.ttable( hl.tstruct(), skat_type, ['id']) elif name == 'PCA': self._type = hl.ttable( hl.tstruct(eigenvalues=hl.tarray(hl.tfloat64), scores=hl.tarray(child_typ.col_key_type._insert_field('scores', hl.tarray(hl.tfloat64)))), child_typ.row_key_type._insert_field('loadings', dtype('array<float64>')), child_typ.row_key) elif name == 'IBD': ibd_info_type = hl.tstruct(Z0=hl.tfloat64, Z1=hl.tfloat64, Z2=hl.tfloat64, PI_HAT=hl.tfloat64) ibd_type = hl.tstruct(i=hl.tstr, j=hl.tstr, ibd=ibd_info_type, ibs0=hl.tint64, ibs1=hl.tint64, ibs2=hl.tint64) self._type = hl.ttable( hl.tstruct(), ibd_type, ['i', 'j']) else: assert name == 'LocalLDPrune', name self._type = hl.ttable( hl.tstruct(), child_typ.row_key_type._insert_fields(mean=hl.tfloat64, centered_length_rec=hl.tfloat64), list(child_typ.row_key)) class BlockMatrixToTableApply(TableIR): def __init__(self, bm, aux, config): super().__init__(bm, aux) self.bm = bm self.aux = aux self.config = config def head_str(self): return dump_json(self.config) def _eq(self, other): return self.config == other.config def _compute_type(self): name = self.config['name'] assert name == 'PCRelate', name self._type = hl.ttable( hl.tstruct(), hl.tstruct(i=hl.tint32, j=hl.tint32, kin=hl.tfloat64, ibd0=hl.tfloat64, ibd1=hl.tfloat64, ibd2=hl.tfloat64), ['i', 'j']) class BlockMatrixToTable(TableIR): def __init__(self, child): super().__init__(child) self.child = child def _compute_type(self): self._type = hl.ttable(hl.tstruct(), hl.tstruct(**{'i': hl.tint64, 'j': hl.tint64, 'entry': hl.tfloat64}), []) class JavaTable(TableIR): def __init__(self, jir): super().__init__() self._jir = jir def render_head(self, r): return f'(JavaTable {r.add_jir(self._jir)}' def _compute_type(self): self._type = hl.ttable._from_java(self._jir.typ())
mit
grnet/snf-image-creator
image_creator/distro/unsupported.py
2
1326
# -*- coding: utf-8 -*- # # Copyright (C) 2011-2018 GRNET S.A. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """This module hosts code to handle unknown OSes.""" from image_creator.distro import OSBase class Unsupported(OSBase): """OS class for unsupported OSes""" def __init__(self, image, **kwargs): super(Unsupported, self).__init__(image, **kwargs) def collect_metadata(self): """Collect metadata about the OS""" self.out.warn("Unable to collect metadata for unsupported media") def _do_mount(self, readonly): """Mount partitions in correct order""" self._mount_error = "not supported for this media" return False # vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
gpl-3.0
pymedusa/Medusa
ext/github/Project.py
2
7975
# -*- coding: utf-8 -*- ############################ Copyrights and license ############################ # # # Copyright 2018 bbi-yggy <yossarian@blackbirdinteractive.com> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ from __future__ import absolute_import import six import github.GithubObject import github.ProjectColumn from . import Consts class Project(github.GithubObject.CompletableGithubObject): """ This class represents Projects. The reference can be found here http://developer.github.com/v3/projects """ def __repr__(self): return self.get__repr__({"name": self._name.value}) @property def body(self): """ :type: string """ self._completeIfNotSet(self._body) return self._body.value @property def columns_url(self): """ :type: string """ self._completeIfNotSet(self._columns_url) return self._columns_url.value @property def created_at(self): """ :type: datetime.datetime """ self._completeIfNotSet(self._created_at) return self._created_at.value @property def creator(self): """ :type: :class:`github.NamedUser.NamedUser` """ self._completeIfNotSet(self._creator) return self._creator.value @property def html_url(self): """ :type: string """ self._completeIfNotSet(self._html_url) return self._html_url.value @property def id(self): """ :type: integer """ self._completeIfNotSet(self._id) return self._id.value @property def name(self): """ :type: string """ self._completeIfNotSet(self._name) return self._name.value @property def node_id(self): """ :type: string """ self._completeIfNotSet(self._node_id) return self._node_id.value @property def number(self): """ :type: integer """ self._completeIfNotSet(self._number) return self._number.value @property def owner_url(self): """ :type: string """ self._completeIfNotSet(self._owner_url) return self._owner_url.value @property def state(self): """ :type: string """ self._completeIfNotSet(self._state) return self._state.value @property def updated_at(self): """ :type: datetime.datetime """ self._completeIfNotSet(self._updated_at) return self._updated_at.value @property def url(self): """ :type: string """ self._completeIfNotSet(self._url) return self._url.value def get_columns(self): """ :calls: `GET /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#list-project-columns>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.ProjectColumn.ProjectColumn` """ return github.PaginatedList.PaginatedList( github.ProjectColumn.ProjectColumn, self._requester, self.columns_url, None, {"Accept": Consts.mediaTypeProjectsPreview}, ) def create_column(self, name): """ calls: `POST https://developer.github.com/v3/projects/columns/#create-a-project-column>`_ :param name: string """ assert isinstance(name, (str, six.text_type)), name post_parameters = {"name": name} import_header = {"Accept": Consts.mediaTypeProjectsPreview} headers, data = self._requester.requestJsonAndCheck( "POST", self.url + "/columns", headers=import_header, input=post_parameters ) return github.ProjectColumn.ProjectColumn( self._requester, headers, data, completed=True ) def _initAttributes(self): self._body = github.GithubObject.NotSet self._columns_url = github.GithubObject.NotSet self._created_at = github.GithubObject.NotSet self._creator = github.GithubObject.NotSet self._html_url = github.GithubObject.NotSet self._id = github.GithubObject.NotSet self._name = github.GithubObject.NotSet self._node_id = github.GithubObject.NotSet self._number = github.GithubObject.NotSet self._owner_url = github.GithubObject.NotSet self._state = github.GithubObject.NotSet self._updated_at = github.GithubObject.NotSet self._url = github.GithubObject.NotSet def _useAttributes(self, attributes): if "body" in attributes: # pragma no branch self._body = self._makeStringAttribute(attributes["body"]) if "columns_url" in attributes: # pragma no branch self._columns_url = self._makeStringAttribute(attributes["columns_url"]) if "created_at" in attributes: # pragma no branch self._created_at = self._makeDatetimeAttribute(attributes["created_at"]) if "creator" in attributes: # pragma no branch self._creator = self._makeClassAttribute( github.NamedUser.NamedUser, attributes["creator"] ) if "html_url" in attributes: # pragma no branch self._html_url = self._makeStringAttribute(attributes["html_url"]) if "id" in attributes: # pragma no branch self._id = self._makeIntAttribute(attributes["id"]) if "name" in attributes: # pragma no branch self._name = self._makeStringAttribute(attributes["name"]) if "node_id" in attributes: # pragma no branch self._node_id = self._makeStringAttribute(attributes["node_id"]) if "number" in attributes: # pragma no branch self._number = self._makeIntAttribute(attributes["number"]) if "owner_url" in attributes: # pragma no branch self._owner_url = self._makeStringAttribute(attributes["owner_url"]) if "state" in attributes: # pragma no branch self._state = self._makeStringAttribute(attributes["state"]) if "updated_at" in attributes: # pragma no branch self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"]) if "url" in attributes: # pragma no branch self._url = self._makeStringAttribute(attributes["url"])
gpl-3.0
jordotech/sherri_satchmo
satchmo/apps/satchmo_store/shop/listeners.py
4
3503
from livesettings import config_value from payment.listeners import capture_on_ship_listener from product.models import Product from product.listeners import default_product_search_listener, discount_used_listener from satchmo_store.contact import signals as contact_signals from satchmo_store.mail import send_html_email from satchmo_store.shop import signals from satchmo_store.shop.exceptions import OutOfStockError from satchmo_store.shop.models import Order from satchmo_utils.signals import application_search import notification import logging log = logging.getLogger('shop.listeners') # --------------- Optional listeners def only_one_item_in_cart(sender, cart=None, cartitem=None, **kwargs): for item in cart.cartitem_set.all(): if not item == cartitem: log.debug('only one item in cart active: removing %s', item) item.delete() # --------------- Default listeners def decrease_inventory_on_sale(sender, order=None, **kwargs): """Track inventory and total sold.""" # Added to track total sold for each product for item in order.orderitem_set.all(): product = item.product product.total_sold += item.quantity if config_value('PRODUCT','TRACK_INVENTORY'): product.items_in_stock -= item.quantity product.save() def recalc_total_on_contact_change(contact=None, **kwargs): """If the contact has changed, recalculate the order total to ensure all current triggers are hit.""" #TODO: pull just the current order once we start using threadlocal middleware log.debug("Recalculating all contact orders not in process") orders = Order.objects.filter(contact=contact, status="") log.debug("Found %i orders to recalc", orders.count()) for order in orders: order.copy_addresses() order.recalculate_total() def remove_order_on_cart_update(request=None, cart=None, **kwargs): """Remove partially completed order when the cart is updated""" if request: log.debug("caught cart changed signal - remove_order_on_cart_update") Order.objects.remove_partial_order(request) def veto_out_of_stock(sender, cartitem=None, added_quantity=0, **kwargs): """Listener which vetoes adding products to the cart which are out of stock.""" if config_value('PRODUCT','NO_STOCK_CHECKOUT') == False: product = cartitem.product need_qty = cartitem.quantity + added_quantity if product.items_in_stock < need_qty: log.debug('out of stock on %s', product.slug) raise OutOfStockError(product, product.items_in_stock, need_qty) def start_default_listening(): """Add required default listeners""" contact_signals.satchmo_contact_location_changed.connect(recalc_total_on_contact_change, sender=None) signals.order_success.connect(decrease_inventory_on_sale) signals.order_success.connect(notification.order_success_listener, sender=None) signals.order_success.connect(discount_used_listener, sender=None) signals.satchmo_cart_changed.connect(remove_order_on_cart_update, sender=None) application_search.connect(default_product_search_listener, sender=Product) signals.satchmo_order_status_changed.connect(capture_on_ship_listener) signals.satchmo_order_status_changed.connect(notification.notify_on_ship_listener) signals.satchmo_cart_add_verify.connect(veto_out_of_stock) signals.sending_store_mail.connect(send_html_email) log.debug('Added default shop listeners')
bsd-3-clause
neerajvashistha/pa-dude
lib/python2.7/site-packages/numpy/core/tests/test_numeric.py
18
83788
from __future__ import division, absolute_import, print_function import sys import warnings import itertools import platform from decimal import Decimal import numpy as np from numpy.core import umath from numpy.random import rand, randint, randn from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_raises, assert_array_equal, assert_almost_equal, assert_array_almost_equal, dec ) class TestResize(TestCase): def test_copies(self): A = np.array([[1, 2], [3, 4]]) Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) assert_equal(np.resize(A, (2, 4)), Ar1) Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(np.resize(A, (4, 2)), Ar2) Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) assert_equal(np.resize(A, (4, 3)), Ar3) def test_zeroresize(self): A = np.array([[1, 2], [3, 4]]) Ar = np.resize(A, (0,)) assert_equal(Ar, np.array([])) class TestNonarrayArgs(TestCase): # check that non-array arguments to functions wrap them in arrays def test_squeeze(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] assert_(np.squeeze(A).shape == (3, 3)) def test_cumproduct(self): A = [[1, 2, 3], [4, 5, 6]] assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720]))) def test_size(self): A = [[1, 2, 3], [4, 5, 6]] assert_(np.size(A) == 6) assert_(np.size(A, 0) == 2) assert_(np.size(A, 1) == 3) def test_mean(self): A = [[1, 2, 3], [4, 5, 6]] assert_(np.mean(A) == 3.5) assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_(np.isnan(np.mean([]))) assert_(w[0].category is RuntimeWarning) def test_std(self): A = [[1, 2, 3], [4, 5, 6]] assert_almost_equal(np.std(A), 1.707825127659933) assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_(np.isnan(np.std([]))) assert_(w[0].category is RuntimeWarning) def test_var(self): A = [[1, 2, 3], [4, 5, 6]] assert_almost_equal(np.var(A), 2.9166666666666665) assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_(np.isnan(np.var([]))) assert_(w[0].category is RuntimeWarning) class TestBoolScalar(TestCase): def test_logical(self): f = np.False_ t = np.True_ s = "xyz" self.assertTrue((t and s) is s) self.assertTrue((f and s) is f) def test_bitwise_or(self): f = np.False_ t = np.True_ self.assertTrue((t | t) is t) self.assertTrue((f | t) is t) self.assertTrue((t | f) is t) self.assertTrue((f | f) is f) def test_bitwise_and(self): f = np.False_ t = np.True_ self.assertTrue((t & t) is t) self.assertTrue((f & t) is f) self.assertTrue((t & f) is f) self.assertTrue((f & f) is f) def test_bitwise_xor(self): f = np.False_ t = np.True_ self.assertTrue((t ^ t) is f) self.assertTrue((f ^ t) is t) self.assertTrue((t ^ f) is t) self.assertTrue((f ^ f) is f) class TestBoolArray(TestCase): def setUp(self): # offset for simd tests self.t = np.array([True] * 41, dtype=np.bool)[1::] self.f = np.array([False] * 41, dtype=np.bool)[1::] self.o = np.array([False] * 42, dtype=np.bool)[2::] self.nm = self.f.copy() self.im = self.t.copy() self.nm[3] = True self.nm[-2] = True self.im[3] = False self.im[-2] = False def test_all_any(self): self.assertTrue(self.t.all()) self.assertTrue(self.t.any()) self.assertFalse(self.f.all()) self.assertFalse(self.f.any()) self.assertTrue(self.nm.any()) self.assertTrue(self.im.any()) self.assertFalse(self.nm.all()) self.assertFalse(self.im.all()) # check bad element in all positions for i in range(256 - 7): d = np.array([False] * 256, dtype=np.bool)[7::] d[i] = True self.assertTrue(np.any(d)) e = np.array([True] * 256, dtype=np.bool)[7::] e[i] = False self.assertFalse(np.all(e)) assert_array_equal(e, ~d) # big array test for blocked libc loops for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: d = np.array([False] * 100043, dtype=np.bool) d[i] = True self.assertTrue(np.any(d), msg="%r" % i) e = np.array([True] * 100043, dtype=np.bool) e[i] = False self.assertFalse(np.all(e), msg="%r" % i) def test_logical_not_abs(self): assert_array_equal(~self.t, self.f) assert_array_equal(np.abs(~self.t), self.f) assert_array_equal(np.abs(~self.f), self.t) assert_array_equal(np.abs(self.f), self.f) assert_array_equal(~np.abs(self.f), self.t) assert_array_equal(~np.abs(self.t), self.f) assert_array_equal(np.abs(~self.nm), self.im) np.logical_not(self.t, out=self.o) assert_array_equal(self.o, self.f) np.abs(self.t, out=self.o) assert_array_equal(self.o, self.t) def test_logical_and_or_xor(self): assert_array_equal(self.t | self.t, self.t) assert_array_equal(self.f | self.f, self.f) assert_array_equal(self.t | self.f, self.t) assert_array_equal(self.f | self.t, self.t) np.logical_or(self.t, self.t, out=self.o) assert_array_equal(self.o, self.t) assert_array_equal(self.t & self.t, self.t) assert_array_equal(self.f & self.f, self.f) assert_array_equal(self.t & self.f, self.f) assert_array_equal(self.f & self.t, self.f) np.logical_and(self.t, self.t, out=self.o) assert_array_equal(self.o, self.t) assert_array_equal(self.t ^ self.t, self.f) assert_array_equal(self.f ^ self.f, self.f) assert_array_equal(self.t ^ self.f, self.t) assert_array_equal(self.f ^ self.t, self.t) np.logical_xor(self.t, self.t, out=self.o) assert_array_equal(self.o, self.f) assert_array_equal(self.nm & self.t, self.nm) assert_array_equal(self.im & self.f, False) assert_array_equal(self.nm & True, self.nm) assert_array_equal(self.im & False, self.f) assert_array_equal(self.nm | self.t, self.t) assert_array_equal(self.im | self.f, self.im) assert_array_equal(self.nm | True, self.t) assert_array_equal(self.im | False, self.im) assert_array_equal(self.nm ^ self.t, self.im) assert_array_equal(self.im ^ self.f, self.im) assert_array_equal(self.nm ^ True, self.im) assert_array_equal(self.im ^ False, self.im) class TestBoolCmp(TestCase): def setUp(self): self.f = np.ones(256, dtype=np.float32) self.ef = np.ones(self.f.size, dtype=np.bool) self.d = np.ones(128, dtype=np.float64) self.ed = np.ones(self.d.size, dtype=np.bool) # generate values for all permutation of 256bit simd vectors s = 0 for i in range(32): self.f[s:s+8] = [i & 2**x for x in range(8)] self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] s += 8 s = 0 for i in range(16): self.d[s:s+4] = [i & 2**x for x in range(4)] self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] s += 4 self.nf = self.f.copy() self.nd = self.d.copy() self.nf[self.ef] = np.nan self.nd[self.ed] = np.nan def test_float(self): # offset for alignment test for i in range(4): assert_array_equal(self.f[i:] > 0, self.ef[i:]) assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) assert_array_equal(-self.f[i:] < 0, self.ef[i:]) assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) r = self.f[i:] != 0 assert_array_equal(r, self.ef[i:]) r2 = self.f[i:] != np.zeros_like(self.f[i:]) r3 = 0 != self.f[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 assert_array_equal(r.view(np.int8), r.astype(np.int8)) assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same codepath assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) def test_double(self): # offset for alignment test for i in range(2): assert_array_equal(self.d[i:] > 0, self.ed[i:]) assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) assert_array_equal(-self.d[i:] < 0, self.ed[i:]) assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) r = self.d[i:] != 0 assert_array_equal(r, self.ed[i:]) r2 = self.d[i:] != np.zeros_like(self.d[i:]) r3 = 0 != self.d[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 assert_array_equal(r.view(np.int8), r.astype(np.int8)) assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same codepath assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) class TestSeterr(TestCase): def test_default(self): err = np.geterr() self.assertEqual(err, dict( divide='warn', invalid='warn', over='warn', under='ignore', )) def test_set(self): with np.errstate(): err = np.seterr() old = np.seterr(divide='print') self.assertTrue(err == old) new = np.seterr() self.assertTrue(new['divide'] == 'print') np.seterr(over='raise') self.assertTrue(np.geterr()['over'] == 'raise') self.assertTrue(new['divide'] == 'print') np.seterr(**old) self.assertTrue(np.geterr() == old) @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") def test_divide_err(self): with np.errstate(divide='raise'): try: np.array([1.]) / np.array([0.]) except FloatingPointError: pass else: self.fail() np.seterr(divide='ignore') np.array([1.]) / np.array([0.]) def test_errobj(self): olderrobj = np.geterrobj() self.called = 0 try: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") with np.errstate(divide='warn'): np.seterrobj([20000, 1, None]) np.array([1.]) / np.array([0.]) self.assertEqual(len(w), 1) def log_err(*args): self.called += 1 extobj_err = args assert (len(extobj_err) == 2) assert ("divide" in extobj_err[0]) with np.errstate(divide='ignore'): np.seterrobj([20000, 3, log_err]) np.array([1.]) / np.array([0.]) self.assertEqual(self.called, 1) np.seterrobj(olderrobj) with np.errstate(divide='ignore'): np.divide(1., 0., extobj=[20000, 3, log_err]) self.assertEqual(self.called, 2) finally: np.seterrobj(olderrobj) del self.called def test_errobj_noerrmask(self): # errmask = 0 has a special code path for the default olderrobj = np.geterrobj() try: # set errobj to something non default np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT + 1, None]) #call a ufunc np.isnan(np.array([6])) # same with the default, lots of times to get rid of possible # pre-existing stack in the code for i in range(10000): np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, None]) np.isnan(np.array([6])) finally: np.seterrobj(olderrobj) class TestFloatExceptions(TestCase): def assert_raises_fpe(self, fpeerr, flop, x, y): ftype = type(x) try: flop(x, y) assert_(False, "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) except FloatingPointError as exc: assert_(str(exc).find(fpeerr) >= 0, "Type %s raised wrong fpe error '%s'." % (ftype, exc)) def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): # Check that fpe exception is raised. # # Given a floating operation `flop` and two scalar values, check that # the operation raises the floating point exception specified by #`fpeerr`. Tests all variants with 0-d array scalars as well. self.assert_raises_fpe(fpeerr, flop, sc1, sc2) self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) @dec.knownfailureif(True, "See ticket #2350") def test_floating_exceptions(self): # Test basic arithmetic function errors with np.errstate(all='raise'): # Test for all real and complex float types for typecode in np.typecodes['AllFloat']: ftype = np.obj2sctype(typecode) if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' divbyzero = 'divide by zero' else: # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions underflow = '' divbyzero = '' overflow = 'overflow' invalid = 'invalid' self.assert_raises_fpe(underflow, lambda a, b:a/b, ft_tiny, ft_max) self.assert_raises_fpe(underflow, lambda a, b:a*b, ft_tiny, ft_tiny) self.assert_raises_fpe(overflow, lambda a, b:a*b, ft_max, ftype(2)) self.assert_raises_fpe(overflow, lambda a, b:a/b, ft_max, ftype(0.5)) self.assert_raises_fpe(overflow, lambda a, b:a+b, ft_max, ft_max*ft_eps) self.assert_raises_fpe(overflow, lambda a, b:a-b, -ft_max, ft_max*ft_eps) self.assert_raises_fpe(overflow, np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, lambda a, b:a/b, ftype(1), ftype(0)) self.assert_raises_fpe(invalid, lambda a, b:a/b, ftype(np.inf), ftype(np.inf)) self.assert_raises_fpe(invalid, lambda a, b:a/b, ftype(0), ftype(0)) self.assert_raises_fpe(invalid, lambda a, b:a-b, ftype(np.inf), ftype(np.inf)) self.assert_raises_fpe(invalid, lambda a, b:a+b, ftype(np.inf), ftype(-np.inf)) self.assert_raises_fpe(invalid, lambda a, b:a*b, ftype(0), ftype(np.inf)) def test_warnings(self): # test warning code path with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") with np.errstate(all="warn"): np.divide(1, 0.) self.assertEqual(len(w), 1) self.assertTrue("divide by zero" in str(w[0].message)) np.array(1e300) * np.array(1e300) self.assertEqual(len(w), 2) self.assertTrue("overflow" in str(w[-1].message)) np.array(np.inf) - np.array(np.inf) self.assertEqual(len(w), 3) self.assertTrue("invalid value" in str(w[-1].message)) np.array(1e-300) * np.array(1e-300) self.assertEqual(len(w), 4) self.assertTrue("underflow" in str(w[-1].message)) class TestTypes(TestCase): def check_promotion_cases(self, promote_func): #Tests that the scalars get coerced correctly. b = np.bool_(0) i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) # coercion within the same kind assert_equal(promote_func(i8, i16), np.dtype(np.int16)) assert_equal(promote_func(i32, i8), np.dtype(np.int32)) assert_equal(promote_func(i16, i64), np.dtype(np.int64)) assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) assert_equal(promote_func(f32, f64), np.dtype(np.float64)) assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) # coercion between kinds assert_equal(promote_func(b, i32), np.dtype(np.int32)) assert_equal(promote_func(b, u8), np.dtype(np.uint8)) assert_equal(promote_func(i8, u8), np.dtype(np.int16)) assert_equal(promote_func(u8, i32), np.dtype(np.int32)) assert_equal(promote_func(i64, u32), np.dtype(np.int64)) assert_equal(promote_func(u64, i32), np.dtype(np.float64)) assert_equal(promote_func(i32, f32), np.dtype(np.float64)) assert_equal(promote_func(i64, f32), np.dtype(np.float64)) assert_equal(promote_func(f32, i16), np.dtype(np.float32)) assert_equal(promote_func(f32, u32), np.dtype(np.float64)) assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) # coercion between scalars and 1-D arrays assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8)) assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32)) assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32)) assert_equal(promote_func(np.int32(-1), np.array([u64])), np.dtype(np.float64)) assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32)) assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32)) assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64)) assert_equal(promote_func(fld, np.array([c64])), np.dtype(np.complex64)) assert_equal(promote_func(c64, np.array([f64])), np.dtype(np.complex128)) assert_equal(promote_func(np.complex64(3j), np.array([f64])), np.dtype(np.complex128)) # coercion between scalars and 1-D arrays, where # the scalar has greater kind than the array assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) # uint and int are treated as the same "kind" for # the purposes of array-scalar promotion. assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16)) # float and complex are treated as the same "kind" for # the purposes of array-scalar promotion, so that you can do # (0j + float32array) to get a complex64 array instead of # a complex128 array. assert_equal(promote_func(np.array([f32]), c128), np.dtype(np.complex64)) def test_coercion(self): def res_type(a, b): return np.add(a, b).dtype self.check_promotion_cases(res_type) # Use-case: float/complex scalar * bool/int8 array # shouldn't narrow the float/complex type for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: b = 1.234 * a assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) b = np.longdouble(1.234) * a assert_equal(b.dtype, np.dtype(np.longdouble), "array type %s" % a.dtype) b = np.float64(1.234) * a assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) b = np.float32(1.234) * a assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) b = np.float16(1.234) * a assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) b = 1.234j * a assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) b = np.clongdouble(1.234j) * a assert_equal(b.dtype, np.dtype(np.clongdouble), "array type %s" % a.dtype) b = np.complex128(1.234j) * a assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) b = np.complex64(1.234j) * a assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) # The following use-case is problematic, and to resolve its # tricky side-effects requires more changes. # ## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is ## a float32, shouldn't promote to float64 #a = np.array([1.0, 1.5], dtype=np.float32) #t = np.array([True, False]) #b = t*a #assert_equal(b, [1.0, 0.0]) #assert_equal(b.dtype, np.dtype('f4')) #b = (1-t)*a #assert_equal(b, [0.0, 1.5]) #assert_equal(b.dtype, np.dtype('f4')) ## Probably ~t (bitwise negation) is more proper to use here, ## but this is arguably less intuitive to understand at a glance, and ## would fail if 't' is actually an integer array instead of boolean: #b = (~t)*a #assert_equal(b, [0.0, 1.5]) #assert_equal(b.dtype, np.dtype('f4')) def test_result_type(self): self.check_promotion_cases(np.result_type) assert_(np.result_type(None) == np.dtype(None)) def test_promote_types_endian(self): # promote_types should always return native-endian types assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8')) assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8')) assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21')) assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21')) assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21')) assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8')) assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8')) assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8')) assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8')) assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8')) assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8')) assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8')) assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8')) assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8')) assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8')) def test_promote_types_strings(self): assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30')) def test_can_cast(self): assert_(np.can_cast(np.int32, np.int64)) assert_(np.can_cast(np.float64, np.complex)) assert_(not np.can_cast(np.complex, np.float)) assert_(np.can_cast('i8', 'f8')) assert_(not np.can_cast('i8', 'f4')) assert_(np.can_cast('i4', 'S11')) assert_(np.can_cast('i8', 'i8', 'no')) assert_(not np.can_cast('<i8', '>i8', 'no')) assert_(np.can_cast('<i8', '>i8', 'equiv')) assert_(not np.can_cast('<i4', '>i8', 'equiv')) assert_(np.can_cast('<i4', '>i8', 'safe')) assert_(not np.can_cast('<i8', '>i4', 'safe')) assert_(np.can_cast('<i8', '>i4', 'same_kind')) assert_(not np.can_cast('<i8', '>u4', 'same_kind')) assert_(np.can_cast('<i8', '>u4', 'unsafe')) assert_(np.can_cast('bool', 'S5')) assert_(not np.can_cast('bool', 'S4')) assert_(np.can_cast('b', 'S4')) assert_(not np.can_cast('b', 'S3')) assert_(np.can_cast('u1', 'S3')) assert_(not np.can_cast('u1', 'S2')) assert_(np.can_cast('u2', 'S5')) assert_(not np.can_cast('u2', 'S4')) assert_(np.can_cast('u4', 'S10')) assert_(not np.can_cast('u4', 'S9')) assert_(np.can_cast('u8', 'S20')) assert_(not np.can_cast('u8', 'S19')) assert_(np.can_cast('i1', 'S4')) assert_(not np.can_cast('i1', 'S3')) assert_(np.can_cast('i2', 'S6')) assert_(not np.can_cast('i2', 'S5')) assert_(np.can_cast('i4', 'S11')) assert_(not np.can_cast('i4', 'S10')) assert_(np.can_cast('i8', 'S21')) assert_(not np.can_cast('i8', 'S20')) assert_(np.can_cast('bool', 'S5')) assert_(not np.can_cast('bool', 'S4')) assert_(np.can_cast('b', 'U4')) assert_(not np.can_cast('b', 'U3')) assert_(np.can_cast('u1', 'U3')) assert_(not np.can_cast('u1', 'U2')) assert_(np.can_cast('u2', 'U5')) assert_(not np.can_cast('u2', 'U4')) assert_(np.can_cast('u4', 'U10')) assert_(not np.can_cast('u4', 'U9')) assert_(np.can_cast('u8', 'U20')) assert_(not np.can_cast('u8', 'U19')) assert_(np.can_cast('i1', 'U4')) assert_(not np.can_cast('i1', 'U3')) assert_(np.can_cast('i2', 'U6')) assert_(not np.can_cast('i2', 'U5')) assert_(np.can_cast('i4', 'U11')) assert_(not np.can_cast('i4', 'U10')) assert_(np.can_cast('i8', 'U21')) assert_(not np.can_cast('i8', 'U20')) assert_raises(TypeError, np.can_cast, 'i4', None) assert_raises(TypeError, np.can_cast, None, 'i4') # Custom exception class to test exception propagation in fromiter class NIterError(Exception): pass class TestFromiter(TestCase): def makegen(self): for x in range(24): yield x**2 def test_types(self): ai32 = np.fromiter(self.makegen(), np.int32) ai64 = np.fromiter(self.makegen(), np.int64) af = np.fromiter(self.makegen(), float) self.assertTrue(ai32.dtype == np.dtype(np.int32)) self.assertTrue(ai64.dtype == np.dtype(np.int64)) self.assertTrue(af.dtype == np.dtype(float)) def test_lengths(self): expected = np.array(list(self.makegen())) a = np.fromiter(self.makegen(), int) a20 = np.fromiter(self.makegen(), int, 20) self.assertTrue(len(a) == len(expected)) self.assertTrue(len(a20) == 20) self.assertRaises(ValueError, np.fromiter, self.makegen(), int, len(expected) + 10) def test_values(self): expected = np.array(list(self.makegen())) a = np.fromiter(self.makegen(), int) a20 = np.fromiter(self.makegen(), int, 20) self.assertTrue(np.alltrue(a == expected, axis=0)) self.assertTrue(np.alltrue(a20 == expected[:20], axis=0)) def load_data(self, n, eindex): # Utility method for the issue 2592 tests. # Raise an exception at the desired index in the iterator. for e in range(n): if e == eindex: raise NIterError('error at index %s' % eindex) yield e def test_2592(self): # Test iteration exceptions are correctly raised. count, eindex = 10, 5 self.assertRaises(NIterError, np.fromiter, self.load_data(count, eindex), dtype=int, count=count) def test_2592_edge(self): # Test iter. exceptions, edge case (exception at end of iterator). count = 10 eindex = count-1 self.assertRaises(NIterError, np.fromiter, self.load_data(count, eindex), dtype=int, count=count) class TestNonzero(TestCase): def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([])), 0) assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) assert_equal(np.nonzero(np.array([])), ([],)) assert_equal(np.count_nonzero(np.array(0)), 0) assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) assert_equal(np.nonzero(np.array(0)), ([],)) assert_equal(np.count_nonzero(np.array(1)), 1) assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) assert_equal(np.nonzero(np.array(1)), ([0],)) def test_nonzero_onedim(self): x = np.array([1, 0, 2, -1, 0, 0, 8]) assert_equal(np.count_nonzero(x), 4) assert_equal(np.count_nonzero(x), 4) assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], dtype=[('a', 'i4'), ('b', 'i2')]) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) def test_nonzero_twodim(self): x = np.array([[0, 1, 0], [2, 0, 3]]) assert_equal(np.count_nonzero(x), 3) assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) x = np.eye(3) assert_equal(np.count_nonzero(x), 3) assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) x = np.array([[(0, 1), (0, 0), (1, 11)], [(1, 1), (1, 0), (0, 0)], [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) assert_equal(np.count_nonzero(x['a']), 4) assert_equal(np.count_nonzero(x['b']), 5) assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) assert_(not x['a'].T.flags.aligned) assert_equal(np.count_nonzero(x['a'].T), 4) assert_equal(np.count_nonzero(x['b'].T), 5) assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) def test_sparse(self): # test special sparse condition boolean code path for i in range(20): c = np.zeros(200, dtype=np.bool) c[i::20] = True assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) c = np.zeros(400, dtype=np.bool) c[10 + i:20 + i] = True c[20 + i*2] = True assert_equal(np.nonzero(c)[0], np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) def test_return_type(self): class C(np.ndarray): pass for view in (C, np.ndarray): for nd in range(1, 4): shape = tuple(range(2, 2+nd)) x = np.arange(np.prod(shape)).reshape(shape).view(view) for nzx in (np.nonzero(x), x.nonzero()): for nzx_i in nzx: assert_(type(nzx_i) is np.ndarray) assert_(nzx_i.flags.writeable) class TestIndex(TestCase): def test_boolean(self): a = rand(3, 5, 8) V = rand(5, 8) g1 = randint(0, 5, size=15) g2 = randint(0, 8, size=15) V[g1, g2] = -V[g1, g2] assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) def test_boolean_edgecase(self): a = np.array([], dtype='int32') b = np.array([], dtype='bool') c = a[b] assert_equal(c, []) assert_equal(c.dtype, np.dtype('int32')) class TestBinaryRepr(TestCase): def test_zero(self): assert_equal(np.binary_repr(0), '0') def test_large(self): assert_equal(np.binary_repr(10736848), '101000111101010011010000') def test_negative(self): assert_equal(np.binary_repr(-1), '-1') assert_equal(np.binary_repr(-1, width=8), '11111111') class TestBaseRepr(TestCase): def test_base3(self): assert_equal(np.base_repr(3**5, 3), '100000') def test_positive(self): assert_equal(np.base_repr(12, 10), '12') assert_equal(np.base_repr(12, 10, 4), '000012') assert_equal(np.base_repr(12, 4), '30') assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') def test_negative(self): assert_equal(np.base_repr(-12, 10), '-12') assert_equal(np.base_repr(-12, 10, 4), '-000012') assert_equal(np.base_repr(-12, 4), '-30') class TestArrayComparisons(TestCase): def test_array_equal(self): res = np.array_equal(np.array([1, 2]), np.array([1, 2])) assert_(res) assert_(type(res) is bool) res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equal(np.array([1, 2]), np.array([3, 4])) assert_(not res) assert_(type(res) is bool) res = np.array_equal(np.array([1, 2]), np.array([1, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1')) assert_(res) assert_(type(res) is bool) res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'), np.array([('a', 1)], dtype='S1,u4')) assert_(res) assert_(type(res) is bool) def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) assert_(res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 1]), np.array([1])) assert_(res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) assert_(res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([2])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) assert_(not res) assert_(type(res) is bool) res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) assert_(not res) assert_(type(res) is bool) def assert_array_strict_equal(x, y): assert_array_equal(x, y) # Check flags, 32 bit arches typically don't provide 16 byte alignment if ((x.dtype.alignment <= 8 or np.intp().dtype.itemsize != 4) and sys.platform != 'win32'): assert_(x.flags == y.flags) else: assert_(x.flags.owndata == y.flags.owndata) assert_(x.flags.writeable == y.flags.writeable) assert_(x.flags.c_contiguous == y.flags.c_contiguous) assert_(x.flags.f_contiguous == y.flags.f_contiguous) assert_(x.flags.updateifcopy == y.flags.updateifcopy) # check endianness assert_(x.dtype.isnative == y.dtype.isnative) class TestClip(TestCase): def setUp(self): self.nr = 5 self.nc = 3 def fastclip(self, a, m, M, out=None): if out is None: return a.clip(m, M) else: return a.clip(m, M, out) def clip(self, a, m, M, out=None): # use slow-clip selector = np.less(a, m) + 2*np.greater(a, M) return selector.choose((a, m, M), out=out) # Handy functions def _generate_data(self, n, m): return randn(n, m) def _generate_data_complex(self, n, m): return randn(n, m) + 1.j * rand(n, m) def _generate_flt_data(self, n, m): return (randn(n, m)).astype(np.float32) def _neg_byteorder(self, a): a = np.asarray(a) if sys.byteorder == 'little': a = a.astype(a.dtype.newbyteorder('>')) else: a = a.astype(a.dtype.newbyteorder('<')) return a def _generate_non_native_data(self, n, m): data = randn(n, m) data = self._neg_byteorder(data) assert_(not data.dtype.isnative) return data def _generate_int_data(self, n, m): return (10 * rand(n, m)).astype(np.int64) def _generate_int32_data(self, n, m): return (10 * rand(n, m)).astype(np.int32) # Now the real test cases def test_simple_double(self): #Test native double input with scalar min/max. a = self._generate_data(self.nr, self.nc) m = 0.1 M = 0.6 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_simple_int(self): #Test native int input with scalar min/max. a = self._generate_int_data(self.nr, self.nc) a = a.astype(int) m = -2 M = 4 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_array_double(self): #Test native double input with array min/max. a = self._generate_data(self.nr, self.nc) m = np.zeros(a.shape) M = m + 0.5 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_simple_nonnative(self): #Test non native double input with scalar min/max. #Test native double input with non native double scalar min/max. a = self._generate_non_native_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_equal(ac, act) #Test native double input with non native double scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = self._neg_byteorder(0.6) assert_(not M.dtype.isnative) ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_equal(ac, act) def test_simple_complex(self): #Test native complex input with native double scalar min/max. #Test native input with complex double scalar min/max. a = 3 * self._generate_data_complex(self.nr, self.nc) m = -0.5 M = 1. ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) #Test native input with complex double scalar min/max. a = 3 * self._generate_data(self.nr, self.nc) m = -0.5 + 1.j M = 1. + 2.j ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_clip_complex(self): # Address Issue gh-5354 for clipping complex arrays # Test native complex input without explicit min/max # ie, either min=None or max=None a = np.ones(10, dtype=np.complex) m = a.min() M = a.max() am = self.fastclip(a, m, None) aM = self.fastclip(a, None, M) assert_array_strict_equal(am, a) assert_array_strict_equal(aM, a) def test_clip_non_contig(self): #Test clip for non contiguous native input and native scalar min/max. a = self._generate_data(self.nr * 2, self.nc * 3) a = a[::2, ::3] assert_(not a.flags['F_CONTIGUOUS']) assert_(not a.flags['C_CONTIGUOUS']) ac = self.fastclip(a, -1.6, 1.7) act = self.clip(a, -1.6, 1.7) assert_array_strict_equal(ac, act) def test_simple_out(self): #Test native double input with scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = np.zeros(a.shape) act = np.zeros(a.shape) self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int32_inout(self): #Test native int32 input with double min/max and int32 out. a = self._generate_int32_data(self.nr, self.nc) m = np.float64(0) M = np.float64(2) ac = np.zeros(a.shape, dtype=np.int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int64_out(self): #Test native int32 input with int32 scalar min/max and int64 out. a = self._generate_int32_data(self.nr, self.nc) m = np.int32(-1) M = np.int32(1) ac = np.zeros(a.shape, dtype=np.int64) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int64_inout(self): #Test native int32 input with double array min/max and int32 out. a = self._generate_int32_data(self.nr, self.nc) m = np.zeros(a.shape, np.float64) M = np.float64(1) ac = np.zeros(a.shape, dtype=np.int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_int32_out(self): #Test native double input with scalar min/max and int out. a = self._generate_data(self.nr, self.nc) m = -1.0 M = 2.0 ac = np.zeros(a.shape, dtype=np.int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_simple_inplace_01(self): #Test native double input with array min/max in-place. a = self._generate_data(self.nr, self.nc) ac = a.copy() m = np.zeros(a.shape) M = 1.0 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_simple_inplace_02(self): #Test native double input with scalar min/max in-place. a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 M = 0.6 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_noncontig_inplace(self): #Test non contiguous double input with double scalar min/max in-place. a = self._generate_data(self.nr * 2, self.nc * 3) a = a[::2, ::3] assert_(not a.flags['F_CONTIGUOUS']) assert_(not a.flags['C_CONTIGUOUS']) ac = a.copy() m = -0.5 M = 0.6 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_equal(a, ac) def test_type_cast_01(self): #Test native double input with scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_type_cast_02(self): #Test native int32 input with int32 scalar min/max. a = self._generate_int_data(self.nr, self.nc) a = a.astype(np.int32) m = -2 M = 4 ac = self.fastclip(a, m, M) act = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_type_cast_03(self): #Test native int32 input with float64 scalar min/max. a = self._generate_int32_data(self.nr, self.nc) m = -2 M = 4 ac = self.fastclip(a, np.float64(m), np.float64(M)) act = self.clip(a, np.float64(m), np.float64(M)) assert_array_strict_equal(ac, act) def test_type_cast_04(self): #Test native int32 input with float32 scalar min/max. a = self._generate_int32_data(self.nr, self.nc) m = np.float32(-2) M = np.float32(4) act = self.fastclip(a, m, M) ac = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_type_cast_05(self): #Test native int32 with double arrays min/max. a = self._generate_int_data(self.nr, self.nc) m = -0.5 M = 1. ac = self.fastclip(a, m * np.zeros(a.shape), M) act = self.clip(a, m * np.zeros(a.shape), M) assert_array_strict_equal(ac, act) def test_type_cast_06(self): #Test native with NON native scalar min/max. a = self._generate_data(self.nr, self.nc) m = 0.5 m_s = self._neg_byteorder(m) M = 1. act = self.clip(a, m_s, M) ac = self.fastclip(a, m_s, M) assert_array_strict_equal(ac, act) def test_type_cast_07(self): #Test NON native with native array min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 * np.ones(a.shape) M = 1. a_s = self._neg_byteorder(a) assert_(not a_s.dtype.isnative) act = a_s.clip(m, M) ac = self.fastclip(a_s, m, M) assert_array_strict_equal(ac, act) def test_type_cast_08(self): #Test NON native with native scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = 1. a_s = self._neg_byteorder(a) assert_(not a_s.dtype.isnative) ac = self.fastclip(a_s, m, M) act = a_s.clip(m, M) assert_array_strict_equal(ac, act) def test_type_cast_09(self): #Test native with NON native array min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 * np.ones(a.shape) M = 1. m_s = self._neg_byteorder(m) assert_(not m_s.dtype.isnative) ac = self.fastclip(a, m_s, M) act = self.clip(a, m_s, M) assert_array_strict_equal(ac, act) def test_type_cast_10(self): #Test native int32 with float min/max and float out for output argument. a = self._generate_int_data(self.nr, self.nc) b = np.zeros(a.shape, dtype=np.float32) m = np.float32(-0.5) M = np.float32(1) act = self.clip(a, m, M, out=b) ac = self.fastclip(a, m, M, out=b) assert_array_strict_equal(ac, act) def test_type_cast_11(self): #Test non native with native scalar, min/max, out non native a = self._generate_non_native_data(self.nr, self.nc) b = a.copy() b = b.astype(b.dtype.newbyteorder('>')) bt = b.copy() m = -0.5 M = 1. self.fastclip(a, m, M, out=b) self.clip(a, m, M, out=bt) assert_array_strict_equal(b, bt) def test_type_cast_12(self): #Test native int32 input and min/max and float out a = self._generate_int_data(self.nr, self.nc) b = np.zeros(a.shape, dtype=np.float32) m = np.int32(0) M = np.int32(1) act = self.clip(a, m, M, out=b) ac = self.fastclip(a, m, M, out=b) assert_array_strict_equal(ac, act) def test_clip_with_out_simple(self): #Test native double input with scalar min/max a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 ac = np.zeros(a.shape) act = np.zeros(a.shape) self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_simple2(self): #Test native int32 input with double min/max and int32 out a = self._generate_int32_data(self.nr, self.nc) m = np.float64(0) M = np.float64(2) ac = np.zeros(a.shape, dtype=np.int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_simple_int32(self): #Test native int32 input with int32 scalar min/max and int64 out a = self._generate_int32_data(self.nr, self.nc) m = np.int32(-1) M = np.int32(1) ac = np.zeros(a.shape, dtype=np.int64) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_array_int32(self): #Test native int32 input with double array min/max and int32 out a = self._generate_int32_data(self.nr, self.nc) m = np.zeros(a.shape, np.float64) M = np.float64(1) ac = np.zeros(a.shape, dtype=np.int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_with_out_array_outint32(self): #Test native double input with scalar min/max and int out a = self._generate_data(self.nr, self.nc) m = -1.0 M = 2.0 ac = np.zeros(a.shape, dtype=np.int32) act = ac.copy() self.fastclip(a, m, M, ac) self.clip(a, m, M, act) assert_array_strict_equal(ac, act) def test_clip_inplace_array(self): #Test native double input with array min/max a = self._generate_data(self.nr, self.nc) ac = a.copy() m = np.zeros(a.shape) M = 1.0 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_clip_inplace_simple(self): #Test native double input with scalar min/max a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 M = 0.6 self.fastclip(a, m, M, a) self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) def test_clip_func_takes_out(self): # Ensure that the clip() function takes an out= argument. a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 M = 0.6 a2 = np.clip(a, m, M, out=a) self.clip(a, m, M, ac) assert_array_strict_equal(a2, ac) self.assertTrue(a2 is a) def test_clip_nan(self): d = np.arange(7.) assert_equal(d.clip(min=np.nan), d) assert_equal(d.clip(max=np.nan), d) assert_equal(d.clip(min=np.nan, max=np.nan), d) assert_equal(d.clip(min=-2, max=np.nan), d) assert_equal(d.clip(min=np.nan, max=10), d) class TestAllclose(object): rtol = 1e-5 atol = 1e-8 def setUp(self): self.olderr = np.seterr(invalid='ignore') def tearDown(self): np.seterr(**self.olderr) def tst_allclose(self, x, y): assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) def tst_not_allclose(self, x, y): assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) def test_ip_allclose(self): #Parametric test factory. arr = np.array([100, 1000]) aran = np.arange(125).reshape((5, 5, 5)) atol = self.atol rtol = self.rtol data = [([1, 0], [1, 0]), ([atol], [0]), ([1], [1+rtol+atol]), (arr, arr + arr*rtol), (arr, arr + arr*rtol + atol*2), (aran, aran + aran*rtol), (np.inf, np.inf), (np.inf, [np.inf])] for (x, y) in data: yield (self.tst_allclose, x, y) def test_ip_not_allclose(self): #Parametric test factory. aran = np.arange(125).reshape((5, 5, 5)) atol = self.atol rtol = self.rtol data = [([np.inf, 0], [1, np.inf]), ([np.inf, 0], [1, 0]), ([np.inf, np.inf], [1, np.inf]), ([np.inf, np.inf], [1, 0]), ([-np.inf, 0], [np.inf, 0]), ([np.nan, 0], [np.nan, 0]), ([atol*2], [0]), ([1], [1+rtol+atol*2]), (aran, aran + aran*atol + atol*2), (np.array([np.inf, 1]), np.array([0, np.inf]))] for (x, y) in data: yield (self.tst_not_allclose, x, y) def test_no_parameter_modification(self): x = np.array([np.inf, 1]) y = np.array([0, np.inf]) np.allclose(x, y) assert_array_equal(x, np.array([np.inf, 1])) assert_array_equal(y, np.array([0, np.inf])) def test_min_int(self): # Could make problems because of abs(min_int) == min_int min_int = np.iinfo(np.int_).min a = np.array([min_int], dtype=np.int_) assert_(np.allclose(a, a)) def test_equalnan(self): x = np.array([1.0, np.nan]) assert_(np.allclose(x, x, equal_nan=True)) def test_return_class_is_ndarray(self): # Issue gh-6475 # Check that allclose does not preserve subtypes class Foo(np.ndarray): def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) a = Foo([1]) assert_(type(np.allclose(a, a)) is bool) class TestIsclose(object): rtol = 1e-5 atol = 1e-8 def setup(self): atol = self.atol rtol = self.rtol arr = np.array([100, 1000]) aran = np.arange(125).reshape((5, 5, 5)) self.all_close_tests = [ ([1, 0], [1, 0]), ([atol], [0]), ([1], [1 + rtol + atol]), (arr, arr + arr*rtol), (arr, arr + arr*rtol + atol), (aran, aran + aran*rtol), (np.inf, np.inf), (np.inf, [np.inf]), ([np.inf, -np.inf], [np.inf, -np.inf]), ] self.none_close_tests = [ ([np.inf, 0], [1, np.inf]), ([np.inf, -np.inf], [1, 0]), ([np.inf, np.inf], [1, -np.inf]), ([np.inf, np.inf], [1, 0]), ([np.nan, 0], [np.nan, -np.inf]), ([atol*2], [0]), ([1], [1 + rtol + atol*2]), (aran, aran + rtol*1.1*aran + atol*1.1), (np.array([np.inf, 1]), np.array([0, np.inf])), ] self.some_close_tests = [ ([np.inf, 0], [np.inf, atol*2]), ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), (np.arange(3), [0, 1, 2.1]), (np.nan, [np.nan, np.nan, np.nan]), ([0], [atol, np.inf, -np.inf, np.nan]), (0, [atol, np.inf, -np.inf, np.nan]), ] self.some_close_results = [ [True, False], [True, False, False], [True, True, False], [False, False, False], [True, False, False, False], [True, False, False, False], ] def test_ip_isclose(self): self.setup() tests = self.some_close_tests results = self.some_close_results for (x, y), result in zip(tests, results): yield (assert_array_equal, np.isclose(x, y), result) def tst_all_isclose(self, x, y): assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) def tst_none_isclose(self, x, y): msg = "%s and %s shouldn't be close" assert_(not np.any(np.isclose(x, y)), msg % (x, y)) def tst_isclose_allclose(self, x, y): msg = "isclose.all() and allclose aren't same for %s and %s" assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) def test_ip_all_isclose(self): self.setup() for (x, y) in self.all_close_tests: yield (self.tst_all_isclose, x, y) def test_ip_none_isclose(self): self.setup() for (x, y) in self.none_close_tests: yield (self.tst_none_isclose, x, y) def test_ip_isclose_allclose(self): self.setup() tests = (self.all_close_tests + self.none_close_tests + self.some_close_tests) for (x, y) in tests: yield (self.tst_isclose_allclose, x, y) def test_equal_nan(self): assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) arr = np.array([1.0, np.nan]) assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) def test_masked_arrays(self): # Make sure to test the output type when arguments are interchanged. x = np.ma.masked_where([True, True, False], np.arange(3)) assert_(type(x) is type(np.isclose(2, x))) assert_(type(x) is type(np.isclose(x, 2))) x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) assert_(type(x) is type(np.isclose(np.inf, x))) assert_(type(x) is type(np.isclose(x, np.inf))) x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) y = np.isclose(np.nan, x, equal_nan=True) assert_(type(x) is type(y)) # Ensure that the mask isn't modified... assert_array_equal([True, True, False], y.mask) y = np.isclose(x, np.nan, equal_nan=True) assert_(type(x) is type(y)) # Ensure that the mask isn't modified... assert_array_equal([True, True, False], y.mask) x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) y = np.isclose(x, x, equal_nan=True) assert_(type(x) is type(y)) # Ensure that the mask isn't modified... assert_array_equal([True, True, False], y.mask) def test_scalar_return(self): assert_(np.isscalar(np.isclose(1, 1))) def test_no_parameter_modification(self): x = np.array([np.inf, 1]) y = np.array([0, np.inf]) np.isclose(x, y) assert_array_equal(x, np.array([np.inf, 1])) assert_array_equal(y, np.array([0, np.inf])) class TestStdVar(TestCase): def setUp(self): self.A = np.array([1, -1, 1, -1]) self.real_var = 1 def test_basic(self): assert_almost_equal(np.var(self.A), self.real_var) assert_almost_equal(np.std(self.A)**2, self.real_var) def test_scalars(self): assert_equal(np.var(1), 0) assert_equal(np.std(1), 0) def test_ddof1(self): assert_almost_equal(np.var(self.A, ddof=1), self.real_var*len(self.A)/float(len(self.A)-1)) assert_almost_equal(np.std(self.A, ddof=1)**2, self.real_var*len(self.A)/float(len(self.A)-1)) def test_ddof2(self): assert_almost_equal(np.var(self.A, ddof=2), self.real_var*len(self.A)/float(len(self.A)-2)) assert_almost_equal(np.std(self.A, ddof=2)**2, self.real_var*len(self.A)/float(len(self.A)-2)) def test_out_scalar(self): d = np.arange(10) out = np.array(0.) r = np.std(d, out=out) assert_(r is out) assert_array_equal(r, out) r = np.var(d, out=out) assert_(r is out) assert_array_equal(r, out) r = np.mean(d, out=out) assert_(r is out) assert_array_equal(r, out) class TestStdVarComplex(TestCase): def test_basic(self): A = np.array([1, 1.j, -1, -1.j]) real_var = 1 assert_almost_equal(np.var(A), real_var) assert_almost_equal(np.std(A)**2, real_var) def test_scalars(self): assert_equal(np.var(1j), 0) assert_equal(np.std(1j), 0) class TestCreationFuncs(TestCase): #Test ones, zeros, empty and filled def setUp(self): self.dtypes = ('b', 'i', 'u', 'f', 'c', 'S', 'a', 'U', 'V') self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} self.ndims = 10 def check_function(self, func, fill_value=None): par = ( (0, 1, 2), range(self.ndims), self.orders, self.dtypes, 2**np.arange(9) ) fill_kwarg = {} if fill_value is not None: fill_kwarg = {'fill_value': fill_value} with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) for size, ndims, order, type, bytes in itertools.product(*par): shape = ndims * [size] try: dtype = np.dtype('{0}{1}'.format(type, bytes)) except TypeError: # dtype combination does not exist continue else: # do not fill void type if fill_value is not None and type in 'V': continue arr = func(shape, order=order, dtype=dtype, **fill_kwarg) assert_(arr.dtype == dtype) assert_(getattr(arr.flags, self.orders[order])) if fill_value is not None: if dtype.str.startswith('|S'): val = str(fill_value) else: val = fill_value assert_equal(arr, dtype.type(val)) def test_zeros(self): self.check_function(np.zeros) def test_ones(self): self.check_function(np.zeros) def test_empty(self): self.check_function(np.empty) def test_filled(self): self.check_function(np.full, 0) self.check_function(np.full, 1) def test_for_reference_leak(self): # Make sure we have an object for reference dim = 1 beg = sys.getrefcount(dim) np.zeros([dim]*10) assert_(sys.getrefcount(dim) == beg) np.ones([dim]*10) assert_(sys.getrefcount(dim) == beg) np.empty([dim]*10) assert_(sys.getrefcount(dim) == beg) np.full([dim]*10, 0) assert_(sys.getrefcount(dim) == beg) class TestLikeFuncs(TestCase): '''Test ones_like, zeros_like, empty_like and full_like''' def setUp(self): self.data = [ # Array scalars (np.array(3.), None), (np.array(3), 'f8'), # 1D arrays (np.arange(6, dtype='f4'), None), (np.arange(6), 'c16'), # 2D C-layout arrays (np.arange(6).reshape(2, 3), None), (np.arange(6).reshape(3, 2), 'i1'), # 2D F-layout arrays (np.arange(6).reshape((2, 3), order='F'), None), (np.arange(6).reshape((3, 2), order='F'), 'i1'), # 3D C-layout arrays (np.arange(24).reshape(2, 3, 4), None), (np.arange(24).reshape(4, 3, 2), 'f4'), # 3D F-layout arrays (np.arange(24).reshape((2, 3, 4), order='F'), None), (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), # 3D non-C/F-layout arrays (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), ] def compare_array_value(self, dz, value, fill_value): if value is not None: if fill_value: try: z = dz.dtype.type(value) except OverflowError: pass else: assert_(np.all(dz == z)) else: assert_(np.all(dz == value)) def check_like_function(self, like_function, value, fill_value=False): if fill_value: fill_kwarg = {'fill_value': value} else: fill_kwarg = {} for d, dtype in self.data: # default (K) order, dtype dz = like_function(d, dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) assert_equal(np.array(dz.strides)*d.dtype.itemsize, np.array(d.strides)*dz.dtype.itemsize) assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) if dtype is None: assert_equal(dz.dtype, d.dtype) else: assert_equal(dz.dtype, np.dtype(dtype)) self.compare_array_value(dz, value, fill_value) # C order, default dtype dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) assert_(dz.flags.c_contiguous) if dtype is None: assert_equal(dz.dtype, d.dtype) else: assert_equal(dz.dtype, np.dtype(dtype)) self.compare_array_value(dz, value, fill_value) # F order, default dtype dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) assert_(dz.flags.f_contiguous) if dtype is None: assert_equal(dz.dtype, d.dtype) else: assert_equal(dz.dtype, np.dtype(dtype)) self.compare_array_value(dz, value, fill_value) # A order dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) if d.flags.f_contiguous: assert_(dz.flags.f_contiguous) else: assert_(dz.flags.c_contiguous) if dtype is None: assert_equal(dz.dtype, d.dtype) else: assert_equal(dz.dtype, np.dtype(dtype)) self.compare_array_value(dz, value, fill_value) # Test the 'subok' parameter a = np.matrix([[1, 2], [3, 4]]) b = like_function(a, **fill_kwarg) assert_(type(b) is np.matrix) b = like_function(a, subok=False, **fill_kwarg) assert_(type(b) is not np.matrix) def test_ones_like(self): self.check_like_function(np.ones_like, 1) def test_zeros_like(self): self.check_like_function(np.zeros_like, 0) def test_empty_like(self): self.check_like_function(np.empty_like, None) def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True) class TestCorrelate(TestCase): def _setup(self, dt): self.x = np.array([1, 2, 3, 4, 5], dtype=dt) self.xs = np.arange(1, 20)[::3] self.y = np.array([-1, -2, -3], dtype=dt) self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) self.zs = np.array([-3., -14., -30., -48., -66., -84., -102., -54., -19.], dtype=dt) def test_float(self): self._setup(np.float) z = np.correlate(self.x, self.y, 'full') assert_array_almost_equal(z, self.z1) z = np.correlate(self.x, self.y[:-1], 'full') assert_array_almost_equal(z, self.z1_4) z = np.correlate(self.y, self.x, 'full') assert_array_almost_equal(z, self.z2) z = np.correlate(self.x[::-1], self.y, 'full') assert_array_almost_equal(z, self.z1r) z = np.correlate(self.y, self.x[::-1], 'full') assert_array_almost_equal(z, self.z2r) z = np.correlate(self.xs, self.y, 'full') assert_array_almost_equal(z, self.zs) def test_object(self): self._setup(Decimal) z = np.correlate(self.x, self.y, 'full') assert_array_almost_equal(z, self.z1) z = np.correlate(self.y, self.x, 'full') assert_array_almost_equal(z, self.z2) def test_no_overwrite(self): d = np.ones(100) k = np.ones(3) np.correlate(d, k) assert_array_equal(d, np.ones(100)) assert_array_equal(k, np.ones(3)) def test_complex(self): x = np.array([1, 2, 3, 4+1j], dtype=np.complex) y = np.array([-1, -2j, 3+1j], dtype=np.complex) r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex) r_z = r_z[::-1].conjugate() z = np.correlate(y, x, mode='full') assert_array_almost_equal(z, r_z) class TestConvolve(TestCase): def test_object(self): d = [1.] * 100 k = [1.] * 3 assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) def test_no_overwrite(self): d = np.ones(100) k = np.ones(3) np.convolve(d, k) assert_array_equal(d, np.ones(100)) assert_array_equal(k, np.ones(3)) class TestArgwhere(object): def test_2D(self): x = np.arange(6).reshape((2, 3)) assert_array_equal(np.argwhere(x > 1), [[0, 2], [1, 0], [1, 1], [1, 2]]) def test_list(self): assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) class TestStringFunction(object): def test_set_string_function(self): a = np.array([1]) np.set_string_function(lambda x: "FOO", repr=True) assert_equal(repr(a), "FOO") np.set_string_function(None, repr=True) assert_equal(repr(a), "array([1])") np.set_string_function(lambda x: "FOO", repr=False) assert_equal(str(a), "FOO") np.set_string_function(None, repr=False) assert_equal(str(a), "[1]") class TestRoll(TestCase): def test_roll1d(self): x = np.arange(10) xr = np.roll(x, 2) assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) def test_roll2d(self): x2 = np.reshape(np.arange(10), (2, 5)) x2r = np.roll(x2, 1) assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) x2r = np.roll(x2, 1, axis=0) assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) x2r = np.roll(x2, 1, axis=1) assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) def test_roll_empty(self): x = np.array([]) assert_equal(np.roll(x, 1), np.array([])) class TestRollaxis(TestCase): # expected shape indexed by (axis, start) for array of # shape (1, 2, 3, 4) tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), (0, 4): (2, 3, 4, 1), (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), (1, 4): (1, 3, 4, 2), (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), (2, 4): (1, 2, 4, 3), (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), (3, 4): (1, 2, 3, 4)} def test_exceptions(self): a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) assert_raises(ValueError, np.rollaxis, a, -5, 0) assert_raises(ValueError, np.rollaxis, a, 0, -5) assert_raises(ValueError, np.rollaxis, a, 4, 0) assert_raises(ValueError, np.rollaxis, a, 0, 5) def test_results(self): a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() aind = np.indices(a.shape) assert_(a.flags['OWNDATA']) for (i, j) in self.tgtshape: # positive axis, positive start res = np.rollaxis(a, axis=i, start=j) i0, i1, i2, i3 = aind[np.array(res.shape) - 1] assert_(np.all(res[i0, i1, i2, i3] == a)) assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) assert_(not res.flags['OWNDATA']) # negative axis, positive start ip = i + 1 res = np.rollaxis(a, axis=-ip, start=j) i0, i1, i2, i3 = aind[np.array(res.shape) - 1] assert_(np.all(res[i0, i1, i2, i3] == a)) assert_(res.shape == self.tgtshape[(4 - ip, j)]) assert_(not res.flags['OWNDATA']) # positive axis, negative start jp = j + 1 if j < 4 else j res = np.rollaxis(a, axis=i, start=-jp) i0, i1, i2, i3 = aind[np.array(res.shape) - 1] assert_(np.all(res[i0, i1, i2, i3] == a)) assert_(res.shape == self.tgtshape[(i, 4 - jp)]) assert_(not res.flags['OWNDATA']) # negative axis, negative start ip = i + 1 jp = j + 1 if j < 4 else j res = np.rollaxis(a, axis=-ip, start=-jp) i0, i1, i2, i3 = aind[np.array(res.shape) - 1] assert_(np.all(res[i0, i1, i2, i3] == a)) assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) assert_(not res.flags['OWNDATA']) class TestCross(TestCase): def test_2x2(self): u = [1, 2] v = [3, 4] z = -2 cp = np.cross(u, v) assert_equal(cp, z) cp = np.cross(v, u) assert_equal(cp, -z) def test_2x3(self): u = [1, 2] v = [3, 4, 5] z = np.array([10, -5, -2]) cp = np.cross(u, v) assert_equal(cp, z) cp = np.cross(v, u) assert_equal(cp, -z) def test_3x3(self): u = [1, 2, 3] v = [4, 5, 6] z = np.array([-3, 6, -3]) cp = np.cross(u, v) assert_equal(cp, z) cp = np.cross(v, u) assert_equal(cp, -z) def test_broadcasting(self): # Ticket #2624 (Trac #2032) u = np.tile([1, 2], (11, 1)) v = np.tile([3, 4], (11, 1)) z = -2 assert_equal(np.cross(u, v), z) assert_equal(np.cross(v, u), -z) assert_equal(np.cross(u, u), 0) u = np.tile([1, 2], (11, 1)).T v = np.tile([3, 4, 5], (11, 1)) z = np.tile([10, -5, -2], (11, 1)) assert_equal(np.cross(u, v, axisa=0), z) assert_equal(np.cross(v, u.T), -z) assert_equal(np.cross(v, v), 0) u = np.tile([1, 2, 3], (11, 1)).T v = np.tile([3, 4], (11, 1)).T z = np.tile([-12, 9, -2], (11, 1)) assert_equal(np.cross(u, v, axisa=0, axisb=0), z) assert_equal(np.cross(v.T, u.T), -z) assert_equal(np.cross(u.T, u.T), 0) u = np.tile([1, 2, 3], (5, 1)) v = np.tile([4, 5, 6], (5, 1)).T z = np.tile([-3, 6, -3], (5, 1)) assert_equal(np.cross(u, v, axisb=0), z) assert_equal(np.cross(v.T, u), -z) assert_equal(np.cross(u, u), 0) def test_broadcasting_shapes(self): u = np.ones((2, 1, 3)) v = np.ones((5, 3)) assert_equal(np.cross(u, v).shape, (2, 5, 3)) u = np.ones((10, 3, 5)) v = np.ones((2, 5)) assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=2) assert_raises(ValueError, np.cross, u, v, axisa=3, axisb=0) u = np.ones((10, 3, 5, 7)) v = np.ones((5, 7, 2)) assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) assert_raises(ValueError, np.cross, u, v, axisa=-5, axisb=2) assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=-4) # gh-5885 u = np.ones((3, 4, 2)) for axisc in range(-2, 2): assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) def test_outer_out_param(): arr1 = np.ones((5,)) arr2 = np.ones((2,)) arr3 = np.linspace(-2, 2, 5) out1 = np.ndarray(shape=(5,5)) out2 = np.ndarray(shape=(2, 5)) res1 = np.outer(arr1, arr3, out1) assert_equal(res1, out1) assert_equal(np.outer(arr2, arr3, out2), out2) class TestRequire(object): flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', 'F', 'F_CONTIGUOUS', 'FORTRAN', 'A', 'ALIGNED', 'W', 'WRITEABLE', 'O', 'OWNDATA'] def generate_all_false(self, dtype): arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) arr.setflags(write=False) a = arr['a'] assert_(not a.flags['C']) assert_(not a.flags['F']) assert_(not a.flags['O']) assert_(not a.flags['W']) assert_(not a.flags['A']) return a def set_and_check_flag(self, flag, dtype, arr): if dtype is None: dtype = arr.dtype b = np.require(arr, dtype, [flag]) assert_(b.flags[flag]) assert_(b.dtype == dtype) # a further call to np.require ought to return the same array # unless OWNDATA is specified. c = np.require(b, None, [flag]) if flag[0] != 'O': assert_(c is b) else: assert_(c.flags[flag]) def test_require_each(self): id = ['f8', 'i4'] fd = [None, 'f8', 'c16'] for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): a = self.generate_all_false(idtype) yield self.set_and_check_flag, flag, fdtype, a def test_unknown_requirement(self): a = self.generate_all_false('f8') assert_raises(KeyError, np.require, a, None, 'Q') def test_non_array_input(self): a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) assert_(a.flags['O']) assert_(a.flags['C']) assert_(a.flags['A']) assert_(a.dtype == 'i4') assert_equal(a, [1, 2, 3, 4]) def test_C_and_F_simul(self): a = self.generate_all_false('f8') assert_raises(ValueError, np.require, a, None, ['C', 'F']) def test_ensure_array(self): class ArraySubclass(np.ndarray): pass a = ArraySubclass((2,2)) b = np.require(a, None, ['E']) assert_(type(b) is np.ndarray) def test_preserve_subtype(self): class ArraySubclass(np.ndarray): pass for flag in self.flag_names: a = ArraySubclass((2,2)) yield self.set_and_check_flag, flag, None, a class TestBroadcast(TestCase): def test_broadcast_in_args(self): # gh-5881 arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), np.empty((5, 1, 7))] mits = [np.broadcast(*arrs), np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] for mit in mits: assert_equal(mit.shape, (5, 6, 7)) assert_equal(mit.nd, 3) assert_equal(mit.numiter, 4) for a, ia in zip(arrs, mit.iters): assert_(a is ia.base) def test_number_of_arguments(self): arr = np.empty((5,)) for j in range(35): arrs = [arr] * j if j < 2 or j > 32: assert_raises(ValueError, np.broadcast, *arrs) else: mit = np.broadcast(*arrs) assert_equal(mit.numiter, j) if __name__ == "__main__": run_module_suite()
mit
eval1749/elang
build/android/devil/android/sdk/aapt.py
7
1152
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This module wraps the Android Asset Packaging Tool.""" import os from devil.utils import cmd_helper from pylib import constants _AAPT_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt') def _RunAaptCmd(args): """Runs an aapt command. Args: args: A list of arguments for aapt. Returns: The output of the command. """ cmd = [_AAPT_PATH] + args status, output = cmd_helper.GetCmdStatusAndOutput(cmd) if status != 0: raise Exception('Failed running aapt command: "%s" with output "%s".' % (' '.join(cmd), output)) return output def Dump(what, apk, assets=None): """Returns the output of the aapt dump command. Args: what: What you want to dump. apk: Path to apk you want to dump information for. assets: List of assets in apk you want to dump information for. """ assets = assets or [] if isinstance(assets, basestring): assets = [assets] return _RunAaptCmd(['dump', what, apk] + assets).splitlines()
apache-2.0
wawtechnologies/linux-kernel-3.14.51-catchwire-kalitap
Documentation/target/tcm_mod_builder.py
868
40692
#!/usr/bin/python # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD # # Copyright (c) 2010 Rising Tide Systems # Copyright (c) 2010 Linux-iSCSI.org # # Author: nab@kernel.org # import os, sys import subprocess as sub import string import re import optparse tcm_dir = "" fabric_ops = [] fabric_mod_dir = "" fabric_mod_port = "" fabric_mod_init_port = "" def tcm_mod_err(msg): print msg sys.exit(1) def tcm_mod_create_module_subdir(fabric_mod_dir_var): if os.path.isdir(fabric_mod_dir_var) == True: return 1 print "Creating fabric_mod_dir: " + fabric_mod_dir_var ret = os.mkdir(fabric_mod_dir_var) if ret: tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) return def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" buf += " u64 nport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* FC lport target portal group tag for TCM */\n" buf += " u16 lport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" buf += " struct " + fabric_mod_name + "_lport *lport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_lport {\n" buf += " /* SCSI protocol the lport is providing */\n" buf += " u8 lport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" buf += " u64 lport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Target Lport */\n" buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" buf += " struct se_wwn lport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "lport" fabric_mod_init_port = "nport" return def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" buf += " u64 iport_wwpn;\n" buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* SAS port target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" buf += " u64 tport_wwpn;\n" buf += " /* ASCII formatted WWPN for SAS Target port */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* ASCII formatted InitiatorName */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* iSCSI target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* ASCII formatted TargetName for IQN */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): if proto_ident == "FC": tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "SAS": tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "iSCSI": tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) else: print "Unsupported proto_ident: " + proto_ident sys.exit(1) return def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#include <linux/module.h>\n" buf += "#include <linux/moduleparam.h>\n" buf += "#include <linux/version.h>\n" buf += "#include <generated/utsrelease.h>\n" buf += "#include <linux/utsname.h>\n" buf += "#include <linux/init.h>\n" buf += "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/configfs.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_fabric_configfs.h>\n" buf += "#include <target/target_core_configfs.h>\n" buf += "#include <target/configfs_macros.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "/* Local pointer to allocated TCM configfs fabric module */\n" buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n" buf += " u32 nexus_depth;\n\n" buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n" buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" buf += " if (!se_nacl_new)\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" buf += " nexus_depth = 1;\n" buf += " /*\n" buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" buf += " * when converting a NodeACL from demo mode -> explict\n" buf += " */\n" buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" buf += " name, nexus_depth);\n" buf += " if (IS_ERR(se_nacl)) {\n" buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" buf += " return se_nacl;\n" buf += " }\n" buf += " /*\n" buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" buf += " */\n" buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return se_nacl;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" buf += " kfree(nacl);\n" buf += "}\n\n" buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" buf += " struct se_wwn *wwn,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" buf += " unsigned long tpgt;\n" buf += " int ret;\n\n" buf += " if (strstr(name, \"tpgt_\") != name)\n" buf += " return ERR_PTR(-EINVAL);\n" buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" buf += " return ERR_PTR(-EINVAL);\n\n" buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" buf += " if (!tpg) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" buf += " &tpg->se_tpg, (void *)tpg,\n" buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" buf += " if (ret < 0) {\n" buf += " kfree(tpg);\n" buf += " return NULL;\n" buf += " }\n" buf += " return &tpg->se_tpg;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" buf += " core_tpg_deregister(se_tpg);\n" buf += " kfree(tpg);\n" buf += "}\n\n" buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n\n" buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n\n" buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" buf += " if (!" + fabric_mod_port + ") {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" buf += " kfree(" + fabric_mod_port + ");\n" buf += "}\n\n" buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " char *page)\n" buf += "{\n" buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += "}\n\n" buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" buf += " NULL,\n" buf += "};\n\n" buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" buf += " .close_session = " + fabric_mod_name + "_close_session,\n" buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" buf += " /*\n" buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" buf += " */\n" buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" buf += " .fabric_post_link = NULL,\n" buf += " .fabric_pre_unlink = NULL,\n" buf += " .fabric_make_np = NULL,\n" buf += " .fabric_drop_np = NULL,\n" buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" buf += "};\n\n" buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" buf += "{\n" buf += " struct target_fabric_configfs *fabric;\n" buf += " int ret;\n\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += " /*\n" buf += " * Register the top level struct config_item_type with TCM core\n" buf += " */\n" buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" buf += " if (IS_ERR(fabric)) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" buf += " return PTR_ERR(fabric);\n" buf += " }\n" buf += " /*\n" buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" buf += " */\n" buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" buf += " /*\n" buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" buf += " */\n" buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" buf += " /*\n" buf += " * Register the fabric for use within TCM\n" buf += " */\n" buf += " ret = target_fabric_configfs_register(fabric);\n" buf += " if (ret < 0) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" buf += " return ret;\n" buf += " }\n" buf += " /*\n" buf += " * Setup our local pointer to *fabric\n" buf += " */\n" buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n" buf += "{\n" buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n" buf += " return;\n\n" buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += "};\n\n" buf += "static int __init " + fabric_mod_name + "_init(void)\n" buf += "{\n" buf += " int ret;\n\n" buf += " ret = " + fabric_mod_name + "_register_configfs();\n" buf += " if (ret < 0)\n" buf += " return ret;\n\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" buf += "{\n" buf += " " + fabric_mod_name + "_deregister_configfs();\n" buf += "};\n\n" buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" buf += "MODULE_LICENSE(\"GPL\");\n" buf += "module_init(" + fabric_mod_name + "_init);\n" buf += "module_exit(" + fabric_mod_name + "_exit);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_scan_fabric_ops(tcm_dir): fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h" print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api process_fo = 0; p = open(fabric_ops_api, 'r') line = p.readline() while line: if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): line = p.readline() continue if process_fo == 0: process_fo = 1; line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) continue line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) p.close() return def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" bufi = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" print "Writing file: " + fi pi = open(fi, 'w') if not pi: tcm_mod_err("Unable to open file: " + fi) buf = "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/list.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n" buf += "#include <scsi/scsi.h>\n" buf += "#include <scsi/scsi_host.h>\n" buf += "#include <scsi/scsi_device.h>\n" buf += "#include <scsi/scsi_cmnd.h>\n" buf += "#include <scsi/libfc.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_configfs.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" total_fabric_ops = len(fabric_ops) i = 0 while i < total_fabric_ops: fo = fabric_ops[i] i += 1 # print "fabric_ops: " + fo if re.search('get_fabric_name', fo): buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" buf += "{\n" buf += " return \"" + fabric_mod_name[4:] + "\";\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" continue if re.search('get_fabric_proto_ident', fo): buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " u8 proto_id;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" buf += " }\n\n" buf += " return proto_id;\n" buf += "}\n\n" bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" if re.search('get_wwn', fo): buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" if re.search('get_tag', fo): buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" if re.search('get_default_depth', fo): buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" if re.search('get_pr_transport_id\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code,\n" buf += " unsigned char *buf)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *, unsigned char *);\n" if re.search('get_pr_transport_id_len\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *);\n" if re.search('parse_pr_out_transport_id\)\(', fo): buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " const char *buf,\n" buf += " u32 *out_tid_len,\n" buf += " char **port_nexus_ptr)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " char *tid = NULL;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" buf += " }\n\n" buf += " return tid;\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" bufi += " const char *, u32 *, char **);\n" if re.search('alloc_fabric_acl\)\(', fo): buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" buf += " if (!nacl) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n" buf += " return NULL;\n" buf += " }\n\n" buf += " return &nacl->se_node_acl;\n" buf += "}\n\n" bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" if re.search('release_fabric_acl\)\(', fo): buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " kfree(nacl);\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" bufi += " struct se_node_acl *);\n" if re.search('tpg_get_inst_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" if re.search('\*release_cmd\)\(', fo): buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" if re.search('shutdown_session\)\(', fo): buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" if re.search('close_session\)\(', fo): buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" if re.search('stop_session\)\(', fo): buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" if re.search('fall_back_to_erl0\)\(', fo): buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" if re.search('sess_logged_in\)\(', fo): buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" if re.search('sess_get_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" if re.search('write_pending\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" if re.search('write_pending_status\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" if re.search('set_default_node_attributes\)\(', fo): buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" if re.search('get_task_tag\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" if re.search('get_cmd_state\)\(', fo): buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" if re.search('queue_data_in\)\(', fo): buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" if re.search('queue_status\)\(', fo): buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" if re.search('queue_tm_rsp\)\(', fo): buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" if re.search('is_state_remove\)\(', fo): buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() ret = pi.write(bufi) if ret: tcm_mod_err("Unable to write fi: " + fi) pi.close() return def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Makefile" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" buf += " " + fabric_mod_name + "_configfs.o\n" buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Kconfig" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf = "config " + fabric_mod_name.upper() + "\n" buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" buf += " depends on TARGET_CORE && CONFIGFS_FS\n" buf += " default n\n" buf += " ---help---\n" buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" kbuild = tcm_dir + "/drivers/target/Makefile" f = open(kbuild, 'a') f.write(buf) f.close() return def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" kconfig = tcm_dir + "/drivers/target/Kconfig" f = open(kconfig, 'a') f.write(buf) f.close() return def main(modname, proto_ident): # proto_ident = "FC" # proto_ident = "SAS" # proto_ident = "iSCSI" tcm_dir = os.getcwd(); tcm_dir += "/../../" print "tcm_dir: " + tcm_dir fabric_mod_name = modname fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name print "Set fabric_mod_name: " + fabric_mod_name print "Set fabric_mod_dir: " + fabric_mod_dir print "Using proto_ident: " + proto_ident if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": print "Unsupported proto_ident: " + proto_ident sys.exit(1) ret = tcm_mod_create_module_subdir(fabric_mod_dir) if ret: print "tcm_mod_create_module_subdir() failed because module already exists!" sys.exit(1) tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_scan_fabric_ops(tcm_dir) tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) return parser = optparse.OptionParser() parser.add_option('-m', '--modulename', help='Module name', dest='modname', action='store', nargs=1, type='string') parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', action='store', nargs=1, type='string') (opts, args) = parser.parse_args() mandatories = ['modname', 'protoident'] for m in mandatories: if not opts.__dict__[m]: print "mandatory option is missing\n" parser.print_help() exit(-1) if __name__ == "__main__": main(str(opts.modname), opts.protoident)
gpl-2.0
scramblingbalam/Alta_Real
tweet_dumper.py
1
2119
# -*- coding: utf-8 -*- """ Created on Tue Mar 28 15:18:19 2017 @author: yanofsky from https://gist.github.com/yanofsky/5436496 """ #!/usr/bin/env python # encoding: utf-8 import tweepy #https://github.com/tweepy/tweepy import csv #Twitter API credentials consumer_key = "hGqgNKnozGGUZB3IyW6Noheky" consumer_secret = "MjZAkFlsOzDdikPO5HPoNNjsa6FF7pvx99RIgGWxpSNbGRcjti" access_key = "228503532-B6Y7fDmPVP1ppMLG57MJs8jpYfXVMycshIYj4oCc" access_secret = "OTZ7DtmjMqNuChSK5VDNqoqVDOXtVMQnjUTDAYwXVqAAt" def get_all_tweets(screen_name): #Twitter only allows access to a users most recent 3240 tweets with this method #authorize twitter, initialize tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) #initialize a list to hold all the tweepy Tweets alltweets = [] #make initial request for most recent tweets (200 is the maximum allowed count) new_tweets = api.user_timeline(screen_name = screen_name,count=200) #save most recent tweets alltweets.extend(new_tweets) #save the id of the oldest tweet less one oldest = alltweets[-1].id - 1 #keep grabbing tweets until there are no tweets left to grab while len(new_tweets) > 0: print "getting tweets before %s" % (oldest) #all subsiquent requests use the max_id param to prevent duplicates new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest) #save most recent tweets alltweets.extend(new_tweets) #update the id of the oldest tweet less one oldest = alltweets[-1].id - 1 print "...%s tweets downloaded so far" % (len(alltweets)) #transform the tweepy tweets into a 2D array that will populate the csv outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets] #write the csv with open('%s_tweets.csv' % screen_name, 'wb') as f: writer = csv.writer(f) writer.writerow(["id","created_at","text"]) writer.writerows(outtweets) pass if __name__ == '__main__': #pass in the username of the account you want to download get_all_tweets("J_tsar")
mit
vmrob/needy
tests/unit/test_process.py
3
1049
import os import subprocess import sys import unittest import needy.process class ProcessTest(unittest.TestCase): def test_list_command_output(self): self.assertEqual('hello', needy.process.command_output([sys.executable, '-c', 'print(\'hello\')']).strip()) def test_shell_command_output(self): if sys.platform == 'win32': self.assertEqual(os.getcwd(), needy.process.command_output('echo %CD%').strip()) else: self.assertEqual('hello', needy.process.command_output('printf `printf hello`')) def test_command_sequence(self): if sys.platform == 'win32': needy.process.command_sequence(['set FOO=QWERTYUIOP', 'echo %FOO% | findstr "QWERTYUIOP"']) else: needy.process.command_sequence(['export FOO=QWERTYUIOP', 'echo $FOO | grep "QWERTYUIOP"']) def test_command_sequence_failure(self): with self.assertRaises(subprocess.CalledProcessError) as a: needy.process.command_sequence(['notacommand123123', 'alsonotacommand321'])
mit
tmimori/frappe
frappe/email/doctype/auto_email_report/test_auto_email_report.py
17
1091
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest, json # test_records = frappe.get_test_records('Auto Email Report') class TestAutoEmailReport(unittest.TestCase): def test_auto_email(self): frappe.delete_doc('Auto Email Report', 'Permitted Documents For User') auto_email_report = frappe.get_doc(dict( doctype='Auto Email Report', report='Permitted Documents For User', report_type='Script Report', user='Administrator', enabled=1, email_to='test@example.com', format='HTML', frequency='Daily', filters=json.dumps(dict(user='Administrator', doctype='DocType')) )).insert() data = auto_email_report.get_report_content() self.assertTrue('<td>DocShare</td>' in data) self.assertTrue('<td>Core</td>' in data) auto_email_report.format = 'CSV' data = auto_email_report.get_report_content() self.assertTrue('"Language","Core"' in data) auto_email_report.format = 'XLSX' data = auto_email_report.get_report_content()
mit
deeice/bunjalloo
waf_tools/devkitarm.py
5
2313
#!/usr/bin/python import os def get_devkitarm_bin(): try: return os.path.join(os.environ['DEVKITARM'], 'bin') except KeyError: return '' def detect(conf): from Logs import warn if 'DEVKITARM' not in os.environ: warn("`DEVKITARM' variable is not set, compiler may not be found") def prog(name, var=None): arm_eabi = 'arm-eabi-%s' if not var: var = name var = var.upper() exe = conf.find_program(arm_eabi % name, path_list=[get_devkitarm_bin()]) if exe: conf.env[var] = exe prog('ar') prog('cpp') prog('gcc', 'cc') prog('g++', 'cxx') prog('g++', 'link_cxx') prog('objcopy') prog('ranlib') # update the compiler flags dkp = '/' try: dkp = os.environ['DEVKITPRO'] except KeyError: warn("`DEVKITPRO' variable is not set, libraries may not be found") ARCH = '-mthumb -mthumb-interwork'.split() CFLAGS = '''-O2 -ffast-math -Wall -march=armv5te -mtune=arm946e-s -Wno-array-bounds -fomit-frame-pointer'''.split() CFLAGS += ARCH CXXFLAGS = CFLAGS + '-fno-rtti -fno-exceptions'.split() LINKFLAGS = '''-specs=ds_arm9.specs -Wl,-Map,map9.map -Wl,-gc-sections'''.split() LINKFLAGS += ARCH OBJCOPYFLAGS = ''' -I binary -O elf32-littlearm -B arm --rename-section .data=.rodata,readonly,contents '''.split() CCDEFINES = ['ARM9'] libnds = os.path.join(dkp, 'libnds') env = conf.env flags = { 'CXXFLAGS': CXXFLAGS, 'CCFLAGS': CFLAGS, 'CCDEFINES': CCDEFINES, 'CXXDEFINES': CCDEFINES, 'CPPPATH': [os.path.join(libnds, 'include')], 'LIBPATH': [os.path.join(libnds, 'lib')], 'LINKFLAGS': LINKFLAGS, 'OBJCOPYFLAGS': OBJCOPYFLAGS, 'DEVKITARM': os.getenv('DEVKITARM'), 'DEVKITPRO': os.getenv('DEVKITPRO'), } env.update(flags) try: from Configure import conf @conf def check_libnds(self): # need -lfat -lnds9 in that order, but fat requires nds9... self.check(lib='nds9', uselib_store='NDS', mandatory=True) self.check(lib='fat', uselib='NDS', mandatory=True) self.env.prepend_value('LIB_NDS', 'fat') except ImportError: pass
gpl-3.0
horance-liu/tensorflow
tensorflow/contrib/learn/python/learn/datasets/synthetic.py
120
6827
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Synthetic dataset generators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.datasets.base import Dataset def circles(n_samples=100, noise=None, seed=None, factor=0.8, n_classes=2, *args, **kwargs): """Create circles separated by some value Args: n_samples: int, number of datapoints to generate noise: float or None, standard deviation of the Gaussian noise added seed: int or None, seed for the noise factor: float, size factor of the inner circles with respect to the outer ones n_classes: int, number of classes to generate Returns: Shuffled features and labels for 'circles' synthetic dataset of type `base.Dataset` Note: The multi-class support might not work as expected if `noise` is enabled TODO: - Generation of unbalanced data Credit goes to (under BSD 3 clause): B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, G. Louppe, J. Nothman """ if seed is not None: np.random.seed(seed) # Algo: 1) Generate initial circle, 2) For ever class generate a smaller radius circle linspace = np.linspace(0, 2*np.pi, n_samples // n_classes) circ_x = np.empty(0, dtype=np.int32) circ_y = np.empty(0, dtype=np.int32) base_cos = np.cos(linspace) base_sin = np.sin(linspace) y = np.empty(0, dtype=np.int32) for label in range(n_classes): circ_x = np.append(circ_x, base_cos) circ_y = np.append(circ_y, base_sin) base_cos *= factor base_sin *= factor y = np.append(y, label*np.ones(n_samples // n_classes, dtype=np.int32)) # Add more points if n_samples is not divisible by n_classes (unbalanced!) extras = n_samples % n_classes circ_x = np.append(circ_x, np.cos(np.random.rand(extras)*2*np.pi)) circ_y = np.append(circ_y, np.sin(np.random.rand(extras)*2*np.pi)) y = np.append(y, np.zeros(extras, dtype=np.int32)) # Reshape the features/labels X = np.vstack((circ_x, circ_y)).T y = np.hstack(y) # Shuffle the data indices = np.random.permutation(range(n_samples)) if noise is not None: X += np.random.normal(scale=noise, size=X.shape) return Dataset(data=X[indices], target=y[indices]) def spirals(n_samples=100, noise=None, seed=None, mode = 'archimedes', n_loops = 2, *args, **kwargs): """Create spirals Currently only binary classification is supported for spiral generation Args: n_samples: int, number of datapoints to generate noise: float or None, standard deviation of the Gaussian noise added seed: int or None, seed for the noise n_loops: int, number of spiral loops, doesn't play well with 'bernoulli' mode: str, how the spiral should be generated. Current implementations: 'archimedes': a spiral with equal distances between branches 'bernoulli': logarithmic spiral with branch distances increasing 'fermat': a spiral with branch distances decreasing (sqrt) Returns: Shuffled features and labels for 'spirals' synthetic dataset of type `base.Dataset` Raises: ValueError: If the generation `mode` is not valid TODO: - Generation of unbalanced data """ n_classes = 2 # I am not sure how to make it multiclass _modes = { 'archimedes': _archimedes_spiral, 'bernoulli': _bernoulli_spiral, 'fermat': _fermat_spiral } if mode is None or mode not in _modes: raise ValueError("Cannot generate spiral with mode %s"%mode) if seed is not None: np.random.seed(seed) linspace = np.linspace(0, 2*n_loops*np.pi, n_samples // n_classes) spir_x = np.empty(0, dtype=np.int32) spir_y = np.empty(0, dtype=np.int32) y = np.empty(0, dtype=np.int32) for label in range(n_classes): base_cos, base_sin = _modes[mode](linspace, label*np.pi, *args, **kwargs) spir_x = np.append(spir_x, base_cos) spir_y = np.append(spir_y, base_sin) y = np.append(y, label*np.ones(n_samples // n_classes, dtype=np.int32)) # Add more points if n_samples is not divisible by n_classes (unbalanced!) extras = n_samples % n_classes if extras > 0: x_exrta, y_extra = _modes[mode](np.random.rand(extras)*2*np.pi, *args, **kwargs) spir_x = np.append(spir_x, x_extra) spir_y = np.append(spir_y, y_extra) y = np.append(y, np.zeros(extras, dtype=np.int32)) # Reshape the features/labels X = np.vstack((spir_x, spir_y)).T y = np.hstack(y) # Shuffle the data indices = np.random.permutation(range(n_samples)) if noise is not None: X += np.random.normal(scale=noise, size=X.shape) return Dataset(data=X[indices], target=y[indices]) def _archimedes_spiral(theta, theta_offset=0., *args, **kwargs): """Return Archimedes spiral Args: theta: array-like, angles from polar coordinates to be converted theta_offset: float, angle offset in radians (2*pi = 0) """ x, y = theta*np.cos(theta + theta_offset), theta*np.sin(theta + theta_offset) x_norm = np.max(np.abs(x)) y_norm = np.max(np.abs(y)) x, y = x / x_norm, y / y_norm return x, y def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs): """Return Equiangular (Bernoulli's) spiral Args: theta: array-like, angles from polar coordinates to be converted theta_offset: float, angle offset in radians (2*pi = 0) Kwargs: exp_scale: growth rate of the exponential """ exp_scale = kwargs.pop('exp_scale', 0.1) x, y = np.exp(exp_scale*theta)*np.cos(theta + theta_offset), np.exp(exp_scale*theta)*np.sin(theta + theta_offset) x_norm = np.max(np.abs(x)) y_norm = np.max(np.abs(y)) x, y = x / x_norm, y / y_norm return x, y def _fermat_spiral(theta, theta_offset=0., *args, **kwargs): """Return Parabolic (Fermat's) spiral Args: theta: array-like, angles from polar coordinates to be converted theta_offset: float, angle offset in radians (2*pi = 0) """ x, y = np.sqrt(theta)*np.cos(theta + theta_offset), np.sqrt(theta)*np.sin(theta + theta_offset) x_norm = np.max(np.abs(x)) y_norm = np.max(np.abs(y)) x, y = x / x_norm, y / y_norm return x, y
apache-2.0
rrohan/scikit-learn
examples/cluster/plot_kmeans_stability_low_dim_dense.py
338
4324
""" ============================================================ Empirical evaluation of the impact of k-means initialization ============================================================ Evaluate the ability of k-means initializations strategies to make the algorithm convergence robust as measured by the relative standard deviation of the inertia of the clustering (i.e. the sum of distances to the nearest cluster center). The first plot shows the best inertia reached for each combination of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method (``init="random"`` or ``init="kmeans++"``) for increasing values of the ``n_init`` parameter that controls the number of initializations. The second plot demonstrate one single run of the ``MiniBatchKMeans`` estimator using a ``init="random"`` and ``n_init=1``. This run leads to a bad convergence (local optimum) with estimated centers stuck between ground truth clusters. The dataset used for evaluation is a 2D grid of isotropic Gaussian clusters widely spaced. """ print(__doc__) # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn.utils import shuffle from sklearn.utils import check_random_state from sklearn.cluster import MiniBatchKMeans from sklearn.cluster import KMeans random_state = np.random.RandomState(0) # Number of run (with randomly generated dataset) for each strategy so as # to be able to compute an estimate of the standard deviation n_runs = 5 # k-means models can do several random inits so as to be able to trade # CPU time for convergence robustness n_init_range = np.array([1, 5, 10, 15, 20]) # Datasets generation parameters n_samples_per_center = 100 grid_size = 3 scale = 0.1 n_clusters = grid_size ** 2 def make_data(random_state, n_samples_per_center, grid_size, scale): random_state = check_random_state(random_state) centers = np.array([[i, j] for i in range(grid_size) for j in range(grid_size)]) n_clusters_true, n_features = centers.shape noise = random_state.normal( scale=scale, size=(n_samples_per_center, centers.shape[1])) X = np.concatenate([c + noise for c in centers]) y = np.concatenate([[i] * n_samples_per_center for i in range(n_clusters_true)]) return shuffle(X, y, random_state=random_state) # Part 1: Quantitative evaluation of various init methods fig = plt.figure() plots = [] legends = [] cases = [ (KMeans, 'k-means++', {}), (KMeans, 'random', {}), (MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}), (MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}), ] for factory, init, params in cases: print("Evaluation of %s with %s init" % (factory.__name__, init)) inertia = np.empty((len(n_init_range), n_runs)) for run_id in range(n_runs): X, y = make_data(run_id, n_samples_per_center, grid_size, scale) for i, n_init in enumerate(n_init_range): km = factory(n_clusters=n_clusters, init=init, random_state=run_id, n_init=n_init, **params).fit(X) inertia[i, run_id] = km.inertia_ p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1)) plots.append(p[0]) legends.append("%s with %s init" % (factory.__name__, init)) plt.xlabel('n_init') plt.ylabel('inertia') plt.legend(plots, legends) plt.title("Mean inertia for various k-means init across %d runs" % n_runs) # Part 2: Qualitative visual inspection of the convergence X, y = make_data(random_state, n_samples_per_center, grid_size, scale) km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1, random_state=random_state).fit(X) fig = plt.figure() for k in range(n_clusters): my_members = km.labels_ == k color = cm.spectral(float(k) / n_clusters, 1) plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color) cluster_center = km.cluster_centers_[k] plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=color, markeredgecolor='k', markersize=6) plt.title("Example cluster allocation with a single random init\n" "with MiniBatchKMeans") plt.show()
bsd-3-clause
nivertech/libsnark
src/reductions/tbcs_to_uscs/get_tbcs_reduction.py
6
1708
#!/usr/bin/env python ## # @author This file is part of libsnark, developed by SCIPR Lab # and contributors (see AUTHORS). # @copyright MIT license (see LICENSE file) from __future__ import division import itertools def valid_formula(truth_table, x_coeff, y_coeff, z_coeff, offset): for x in [0,1]: for y in [0,1]: z = truth_table[2*x + y] # we require that z can be set to the correct value, but can *not* be set to the incorrect one if ((x*x_coeff + y*y_coeff + z*z_coeff + offset not in [-1, 1]) or (x*x_coeff + y*y_coeff + (1-z)*z_coeff + offset in [-1, 1])): return False return True def all_valid_formulas(truth_table, x_coeff_range, y_coeff_range, z_coeff_range, offset_range): for x_coeff, y_coeff, z_coeff, offset in itertools.product(x_coeff_range, y_coeff_range, z_coeff_range, offset_range): if valid_formula(truth_table, x_coeff, y_coeff, z_coeff, offset): yield x_coeff, y_coeff, z_coeff, offset if __name__ == '__main__': x_coeff_range, y_coeff_range, z_coeff_range, offset_range = range(-2, 3), range(-2, 3), range(1, 5), range(-5, 6) print "Possible coefficients for x: %s, for y: %s, for z: %s, for offset: %s" % (x_coeff_range, y_coeff_range, z_coeff_range, offset_range) for truth_table in itertools.product([0, 1], repeat=4): print "Truth table (00, 01, 10, 11):", truth_table for x_coeff, y_coeff, z_coeff, offset in all_valid_formulas(truth_table, x_coeff_range, y_coeff_range, z_coeff_range, offset_range): print " %s * x + %s * y + %s * z + %s \in {-1, 1}" % (x_coeff, y_coeff, z_coeff, offset)
mit
RobertABT/heightmap
build/scipy/scipy/linalg/decomp_cholesky.py
16
9462
"""Cholesky decomposition functions.""" from __future__ import division, print_function, absolute_import from numpy import asarray_chkfinite, asarray # Local imports from .misc import LinAlgError, _datacopied from .lapack import get_lapack_funcs __all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded', 'cho_solve_banded'] def _cholesky(a, lower=False, overwrite_a=False, clean=True, check_finite=True): """Common code for cholesky() and cho_factor().""" if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or _datacopied(a1, a) potrf, = get_lapack_funcs(('potrf',), (a1,)) c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal potrf' % -info) return c, lower def cholesky(a, lower=False, overwrite_a=False, check_finite=True): """ Compute the Cholesky decomposition of a matrix. Returns the Cholesky decomposition, :math:`A = L L^*` or :math:`A = U^* U` of a Hermitian positive-definite matrix A. Parameters ---------- a : (M, M) array_like Matrix to be decomposed lower : bool Whether to compute the upper or lower triangular Cholesky factorization. Default is upper-triangular. overwrite_a : bool Whether to overwrite data in `a` (may improve performance). check_finite : boolean, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- c : (M, M) ndarray Upper- or lower-triangular Cholesky factor of `a`. Raises ------ LinAlgError : if decomposition fails. Examples -------- >>> from scipy import array, linalg, dot >>> a = array([[1,-2j],[2j,5]]) >>> L = linalg.cholesky(a, lower=True) >>> L array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> dot(L, L.T.conj()) array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) """ c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True, check_finite=check_finite) return c def cho_factor(a, lower=False, overwrite_a=False, check_finite=True): """ Compute the Cholesky decomposition of a matrix, to use in cho_solve Returns a matrix containing the Cholesky decomposition, ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`. The return value can be directly used as the first parameter to cho_solve. .. warning:: The returned matrix also contains random data in the entries not used by the Cholesky decomposition. If you need to zero these entries, use the function `cholesky` instead. Parameters ---------- a : (M, M) array_like Matrix to be decomposed lower : boolean Whether to compute the upper or lower triangular Cholesky factorization (Default: upper-triangular) overwrite_a : boolean Whether to overwrite data in a (may improve performance) check_finite : boolean, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- c : (M, M) ndarray Matrix whose upper or lower triangle contains the Cholesky factor of `a`. Other parts of the matrix contain random data. lower : boolean Flag indicating whether the factor is in the lower or upper triangle Raises ------ LinAlgError Raised if decomposition fails. See also -------- cho_solve : Solve a linear set equations using the Cholesky factorization of a matrix. """ c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False, check_finite=check_finite) return c, lower def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True): """Solve the linear equations A x = b, given the Cholesky factorization of A. Parameters ---------- (c, lower) : tuple, (array, bool) Cholesky factorization of a, as given by cho_factor b : array Right-hand side check_finite : boolean, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array The solution to the system A x = b See also -------- cho_factor : Cholesky factorization of a matrix """ (c, lower) = c_and_lower if check_finite: b1 = asarray_chkfinite(b) c = asarray_chkfinite(c) else: b1 = asarray(b) c = asarray(c) if c.ndim != 2 or c.shape[0] != c.shape[1]: raise ValueError("The factored matrix c is not square.") if c.shape[1] != b1.shape[0]: raise ValueError("incompatible dimensions.") overwrite_b = overwrite_b or _datacopied(b1, b) potrs, = get_lapack_funcs(('potrs',), (c, b1)) x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b) if info != 0: raise ValueError('illegal value in %d-th argument of internal potrs' % -info) return x def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True): """ Cholesky decompose a banded Hermitian positive-definite matrix The matrix a is stored in ab either in lower diagonal or upper diagonal ordered form: ab[u + i - j, j] == a[i,j] (if upper form; i <= j) ab[ i - j, j] == a[i,j] (if lower form; i >= j) Example of ab (shape of a is (6,6), u=2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Parameters ---------- ab : (u + 1, M) array_like Banded matrix overwrite_ab : boolean Discard data in ab (may enhance performance) lower : boolean Is the matrix in the lower form. (Default is upper form) check_finite : boolean, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- c : (u + 1, M) ndarray Cholesky factorization of a, in the same banded format as ab """ if check_finite: ab = asarray_chkfinite(ab) else: ab = asarray(ab) pbtrf, = get_lapack_funcs(('pbtrf',), (ab,)) c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal pbtrf' % -info) return c def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True): """Solve the linear equations A x = b, given the Cholesky factorization of A. Parameters ---------- (cb, lower) : tuple, (array, bool) `cb` is the Cholesky factorization of A, as given by cholesky_banded. `lower` must be the same value that was given to cholesky_banded. b : array Right-hand side overwrite_b : bool If True, the function will overwrite the values in `b`. check_finite : boolean, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array The solution to the system A x = b See also -------- cholesky_banded : Cholesky factorization of a banded matrix Notes ----- .. versionadded:: 0.8.0 """ (cb, lower) = cb_and_lower if check_finite: cb = asarray_chkfinite(cb) b = asarray_chkfinite(b) else: cb = asarray(cb) b = asarray(b) # Validate shapes. if cb.shape[-1] != b.shape[0]: raise ValueError("shapes of cb and b are not compatible.") pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b)) x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal pbtrs' % -info) return x
mit
certik/hermes1d
hermes_common/convert_api.py
9
1932
#! /usr/bin/env python from time import strftime, localtime def convert_h(): f = open("_hermes_common_api_new.h", "w") f.write("/* Generated by convert_api.py on %s */\n\n" % \ strftime("%a %b %d %H:%M:%S %Y", localtime())); lines = open("_hermes_common_api.h").readlines() line = lines[0]; f.write(line); del lines[0] line = lines[0]; f.write(line); del lines[0] f.write("""\n\ // To avoid compilation warnings: #undef _XOPEN_SOURCE #undef _POSIX_C_SOURCE \n""") line = lines[0] while not line.startswith("static"): f.write(line) del lines[0] line = lines[0] f.write("#include <complex>\n") f.write("typedef ::std::complex<double> __pyx_t_double_complex;\n\n") while line.startswith("static"): line = line.replace("static", "extern") f.write(line) del lines[0] line = lines[0] f.write(""" extern int import__hermes_common(void); #endif\n""") def convert_cpp(): f = open("_hermes_common_api_new.cpp", "w") f.write("/* Generated by convert_api.py on %s */\n\n" % \ strftime("%a %b %d %H:%M:%S %Y", localtime())); lines = open("_hermes_common_api.h").readlines() line = lines[0] while not line.startswith("static"): del lines[0] line = lines[0] f.write("""\ #include "_hermes_common_api_new.h" """) while line.startswith("static"): line = line.replace("static ", "") f.write(line) del lines[0] line = lines[0] f.write(line) line_old = line for line in lines: if line.startswith("#ifndef"): continue if line.startswith("#define"): continue if line.startswith("#endif"): if not line_old.startswith(" "): continue line = line.replace("static ", "") f.write(line) line_old = line convert_h() convert_cpp()
bsd-3-clause
PaulWay/spacewalk
client/debian/packages-already-in-debian/rhn-client-tools/src/firstboot/rhn_start_gui.py
21
4456
# Copyright 2006--2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # Authors: # Jan Pazdziora jpazdziora at redhat dot com # Daniel Benamy <dbenamy@redhat.com> import sys sys.path.append("/usr/share/rhn") from up2date_client import rhnreg from up2date_client import rhnregGui import gtk from gtk import glade import gettext _ = lambda x: gettext.ldgettext("rhn-client-tools", x) gtk.glade.bindtextdomain("rhn-client-tools") from firstboot.module import Module from firstboot.constants import * from firstboot.loader import _haveNetwork class moduleClass(Module): def __init__(self): Module.__init__(self) self.priority = 106 self.sidebarTitle = _("Set Up Software Updates") self.title = _("Set Up Software Updates") self.skip_registration = False self.start_page_vbox = None def apply(self, interface, testing=False): if testing: return RESULT_SUCCESS if self.skip_registration: interface.moveToPage(pageNum = len(interface.moduleList)) return RESULT_JUMP if not self.start_page.startPageRegisterNow(): dlg = rhnregGui.ConfirmQuitDialog() if dlg.rc == 0: return RESULT_FAILURE else: interface.moveToPage(moduleTitle=_("Finish Updates Setup")) return RESULT_JUMP return RESULT_SUCCESS def createScreen(self): self.vbox = gtk.VBox(spacing=5) def initializeUI(self): if self.start_page_vbox: self.start_page_vbox.destroy() self.start_page_vbox = self._getVbox() self.vbox.pack_start(self.start_page_vbox, True, True) def _system_is_registered(self): if rhnreg.registered(): return True try: _rhsm_path = "/usr/share/rhsm/subscription_manager" _rhsm_path_added = False if _rhsm_path not in sys.path: sys.path.append(_rhsm_path) _rhsm_path_added = True import certlib if _rhsm_path_added: sys.path.remove(_rhsm_path) return certlib.ConsumerIdentity.existsAndValid() except: return False def _getVbox(self): if self._system_is_registered(): self.start_page = KsRegisteredPage() self.skip_registration = True return self.start_page.startPageVbox() if _haveNetwork(): self.start_page = rhnregGui.StartPage(firstboot=True) else: self.start_page = NoNetworkPage() self.skip_registration = True return self.start_page.startPageVbox() class KsRegisteredPage: def __init__(self): gladefile = "/usr/share/rhn/up2date_client/rh_register.glade" ksRegisteredXml = gtk.glade.XML(gladefile, "ksRegisteredFirstbootVbox", domain="rhn-client-tools") self.vbox = ksRegisteredXml.get_widget('ksRegisteredFirstbootVbox') def startPageVbox(self): return self.vbox def startPageRegisterNow(self): return True class NoNetworkPage: def __init__(self): gladefile = "/usr/share/rhn/up2date_client/rh_register.glade" noNetworkXml = gtk.glade.XML(gladefile, "noNetworkFirstbootVbox", domain="rhn-client-tools") self.vbox = noNetworkXml.get_widget('noNetworkFirstbootVbox') noNetworkXml.signal_autoconnect({ "on_whyRegisterButton_clicked" : self.why_register_button_clicked, }) def startPageVbox(self): return self.vbox def startPageRegisterNow(self): # Sure, we'll register now. heh heh heh # Just continue on past the rhn stuff. return True def why_register_button_clicked(self, button): rhnregGui.WhyRegisterDialog()
gpl-2.0
Qalthos/ansible
lib/ansible/plugins/connection/winrm.py
5
30875
# (c) 2014, Chris Church <chris@ninemoreminutes.com> # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Ansible Core Team connection: winrm short_description: Run tasks over Microsoft's WinRM description: - Run commands or put/fetch on a target via WinRM - This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here. They should take the form of variables declared with the following pattern `ansible_winrm_<option>`. version_added: "2.0" requirements: - pywinrm (python library) options: # figure out more elegant 'delegation' remote_addr: description: - Address of the windows machine default: inventory_hostname vars: - name: ansible_host - name: ansible_winrm_host remote_user: keywords: - name: user - name: remote_user description: - The user to log in as to the Windows machine vars: - name: ansible_user - name: ansible_winrm_user port: description: - port for winrm to connect on remote target - The default is the https (5986) port, if using http it should be 5985 vars: - name: ansible_port - name: ansible_winrm_port default: 5986 keywords: - name: port type: integer scheme: description: - URI scheme to use - If not set, then will default to C(https) or C(http) if I(port) is C(5985). choices: [http, https] vars: - name: ansible_winrm_scheme path: description: URI path to connect to default: '/wsman' vars: - name: ansible_winrm_path transport: description: - List of winrm transports to attempt to to use (ssl, plaintext, kerberos, etc) - If None (the default) the plugin will try to automatically guess the correct list - The choices avialable depend on your version of pywinrm type: list vars: - name: ansible_winrm_transport kerberos_command: description: kerberos command to use to request a authentication ticket default: kinit vars: - name: ansible_winrm_kinit_cmd kerberos_mode: description: - kerberos usage mode. - The managed option means Ansible will obtain kerberos ticket. - While the manual one means a ticket must already have been obtained by the user. - If having issues with Ansible freezing when trying to obtain the Kerberos ticket, you can either set this to C(manual) and obtain it outside Ansible or install C(pexpect) through pip and try again. choices: [managed, manual] vars: - name: ansible_winrm_kinit_mode connection_timeout: description: - Sets the operation and read timeout settings for the WinRM connection. - Corresponds to the C(operation_timeout_sec) and C(read_timeout_sec) args in pywinrm so avoid setting these vars with this one. - The default value is whatever is set in the installed version of pywinrm. vars: - name: ansible_winrm_connection_timeout """ import base64 import logging import os import re import traceback import json import tempfile import subprocess HAVE_KERBEROS = False try: import kerberos HAVE_KERBEROS = True except ImportError: pass from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.errors import AnsibleFileNotFound from ansible.module_utils.json_utils import _filter_non_json_lines from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six.moves.urllib.parse import urlunsplit from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.six import binary_type, PY3 from ansible.plugins.connection import ConnectionBase from ansible.plugins.shell.powershell import _parse_clixml from ansible.utils.hashing import secure_hash from ansible.utils.display import Display # getargspec is deprecated in favour of getfullargspec in Python 3 but # getfullargspec is not available in Python 2 if PY3: from inspect import getfullargspec as getargspec else: from inspect import getargspec try: import winrm from winrm import Response from winrm.protocol import Protocol import requests.exceptions HAS_WINRM = True except ImportError as e: HAS_WINRM = False WINRM_IMPORT_ERR = e try: import xmltodict HAS_XMLTODICT = True except ImportError as e: HAS_XMLTODICT = False XMLTODICT_IMPORT_ERR = e HAS_PEXPECT = False try: import pexpect # echo was added in pexpect 3.3+ which is newer than the RHEL package # we can only use pexpect for kerb auth if echo is a valid kwarg # https://github.com/ansible/ansible/issues/43462 if hasattr(pexpect, 'spawn'): argspec = getargspec(pexpect.spawn.__init__) if 'echo' in argspec.args: HAS_PEXPECT = True except ImportError as e: pass # used to try and parse the hostname and detect if IPv6 is being used try: import ipaddress HAS_IPADDRESS = True except ImportError: HAS_IPADDRESS = False display = Display() class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' transport = 'winrm' module_implementation_preferences = ('.ps1', '.exe', '') allow_executable = False has_pipelining = True allow_extras = True def __init__(self, *args, **kwargs): self.always_pipeline_modules = True self.has_native_async = True self.protocol = None self.shell_id = None self.delegate = None self._shell_type = 'powershell' super(Connection, self).__init__(*args, **kwargs) if not C.DEFAULT_DEBUG: logging.getLogger('requests_credssp').setLevel(logging.INFO) logging.getLogger('requests_kerberos').setLevel(logging.INFO) logging.getLogger('urllib3').setLevel(logging.INFO) def _build_winrm_kwargs(self): # this used to be in set_options, as win_reboot needs to be able to # override the conn timeout, we need to be able to build the args # after setting individual options. This is called by _connect before # starting the WinRM connection self._winrm_host = self.get_option('remote_addr') self._winrm_user = self.get_option('remote_user') self._winrm_pass = self._play_context.password self._winrm_port = self.get_option('port') self._winrm_scheme = self.get_option('scheme') # old behaviour, scheme should default to http if not set and the port # is 5985 otherwise https if self._winrm_scheme is None: self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https' self._winrm_path = self.get_option('path') self._kinit_cmd = self.get_option('kerberos_command') self._winrm_transport = self.get_option('transport') self._winrm_connection_timeout = self.get_option('connection_timeout') if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'): self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES) else: # for legacy versions of pywinrm, use the values we know are supported self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos']) # calculate transport if needed if self._winrm_transport is None or self._winrm_transport[0] is None: # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext'] if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)): self._winrm_transport = ['kerberos'] + transport_selector else: self._winrm_transport = transport_selector unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes) if unsupported_transports: raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % to_native(list(unsupported_transports), nonstring='simplerepr')) # if kerberos is among our transports and there's a password specified, we're managing the tickets kinit_mode = self.get_option('kerberos_mode') if kinit_mode is None: # HACK: ideally, remove multi-transport stuff self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "") elif kinit_mode == "managed": self._kerb_managed = True elif kinit_mode == "manual": self._kerb_managed = False # arg names we're going passing directly internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd']) self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass) argspec = getargspec(Protocol.__init__) supported_winrm_args = set(argspec.args) supported_winrm_args.update(internal_kwarg_mask) passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in self.get_option('_extras')]) unsupported_args = passed_winrm_args.difference(supported_winrm_args) # warn for kwargs unsupported by the installed version of pywinrm for arg in unsupported_args: display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg)) # pass through matching extras, excluding the list we want to treat specially for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args): self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg] # Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection # auth itself with a private CCACHE. def _kerb_auth(self, principal, password): if password is None: password = "" self._kerb_ccache = tempfile.NamedTemporaryFile() display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name) krb5ccname = "FILE:%s" % self._kerb_ccache.name os.environ["KRB5CCNAME"] = krb5ccname krb5env = dict(KRB5CCNAME=krb5ccname) # stores various flags to call with kinit, we currently only use this # to set -f so we can get a forward-able ticket (cred delegation) kinit_flags = [] if boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)): kinit_flags.append('-f') kinit_cmdline = [self._kinit_cmd] kinit_cmdline.extend(kinit_flags) kinit_cmdline.append(principal) # pexpect runs the process in its own pty so it can correctly send # the password as input even on MacOS which blocks subprocess from # doing so. Unfortunately it is not available on the built in Python # so we can only use it if someone has installed it if HAS_PEXPECT: proc_mechanism = "pexpect" command = kinit_cmdline.pop(0) password = to_text(password, encoding='utf-8', errors='surrogate_or_strict') display.vvvv("calling kinit with pexpect for principal %s" % principal) try: child = pexpect.spawn(command, kinit_cmdline, timeout=60, env=krb5env, echo=False) except pexpect.ExceptionPexpect as err: err_msg = "Kerberos auth failure when calling kinit cmd " \ "'%s': %s" % (command, to_native(err)) raise AnsibleConnectionFailure(err_msg) try: child.expect(".*:") child.sendline(password) except OSError as err: # child exited before the pass was sent, Ansible will raise # error based on the rc below, just display the error here display.vvvv("kinit with pexpect raised OSError: %s" % to_native(err)) # technically this is the stdout + stderr but to match the # subprocess error checking behaviour, we will call it stderr stderr = child.read() child.wait() rc = child.exitstatus else: proc_mechanism = "subprocess" password = to_bytes(password, encoding='utf-8', errors='surrogate_or_strict') display.vvvv("calling kinit with subprocess for principal %s" % principal) try: p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=krb5env) except OSError as err: err_msg = "Kerberos auth failure when calling kinit cmd " \ "'%s': %s" % (self._kinit_cmd, to_native(err)) raise AnsibleConnectionFailure(err_msg) stdout, stderr = p.communicate(password + b'\n') rc = p.returncode != 0 if rc != 0: # one last attempt at making sure the password does not exist # in the output exp_msg = to_native(stderr.strip()) exp_msg = exp_msg.replace(to_native(password), "<redacted>") err_msg = "Kerberos auth failure for principal %s with %s: %s" \ % (principal, proc_mechanism, exp_msg) raise AnsibleConnectionFailure(err_msg) display.vvvvv("kinit succeeded for principal %s" % principal) def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host) winrm_host = self._winrm_host if HAS_IPADDRESS: display.debug("checking if winrm_host %s is an IPv6 address" % winrm_host) try: ipaddress.IPv6Address(winrm_host) except ipaddress.AddressValueError: pass else: winrm_host = "[%s]" % winrm_host netloc = '%s:%d' % (winrm_host, self._winrm_port) endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', '')) errors = [] for transport in self._winrm_transport: if transport == 'kerberos': if not HAVE_KERBEROS: errors.append('kerberos: the python kerberos library is not installed') continue if self._kerb_managed: self._kerb_auth(self._winrm_user, self._winrm_pass) display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host) try: winrm_kwargs = self._winrm_kwargs.copy() if self._winrm_connection_timeout: winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1 protocol = Protocol(endpoint, transport=transport, **winrm_kwargs) # open the shell from connect so we know we're able to talk to the server if not self.shell_id: self.shell_id = protocol.open_shell(codepage=65001) # UTF-8 display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host) return protocol except Exception as e: err_msg = to_text(e).strip() if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I): raise AnsibleError('the connection attempt timed out') m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg) if m: code = int(m.groups()[0]) if code == 401: err_msg = 'the specified credentials were rejected by the server' elif code == 411: return protocol errors.append(u'%s: %s' % (transport, err_msg)) display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host) if errors: raise AnsibleConnectionFailure(', '.join(map(to_native, errors))) else: raise AnsibleError('No transport found for WinRM connection') def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False): rq = {'env:Envelope': protocol._get_soap_header( resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send', shell_id=shell_id)} stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\ .setdefault('rsp:Stream', {}) stream['@Name'] = 'stdin' stream['@CommandId'] = command_id stream['#text'] = base64.b64encode(to_bytes(stdin)) if eof: stream['@End'] = 'true' protocol.send_message(xmltodict.unparse(rq)) def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None): if not self.protocol: self.protocol = self._winrm_connect() self._connected = True if from_exec: display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) else: display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) command_id = None try: stdin_push_failed = False command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None)) try: if stdin_iterator: for (data, is_last) in stdin_iterator: self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last) except Exception as ex: display.warning("ERROR DURING WINRM SEND INPUT - attempting to recover: %s %s" % (type(ex).__name__, to_text(ex))) display.debug(traceback.format_exc()) stdin_push_failed = True # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy). # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. resptuple = self.protocol.get_command_output(self.shell_id, command_id) # ensure stdout/stderr are text for py3 # FUTURE: this should probably be done internally by pywinrm response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple)) # TODO: check result from response and set stdin_push_failed if we have nonzero if from_exec: display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) else: display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host) display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host) if stdin_push_failed: # There are cases where the stdin input failed but the WinRM service still processed it. We attempt to # see if stdout contains a valid json return value so we can ignore this error try: filtered_output, dummy = _filter_non_json_lines(response.std_out) json.loads(filtered_output) except ValueError: # stdout does not contain a return response, stdin input was a fatal error stderr = to_bytes(response.std_err, encoding='utf-8') if self.is_clixml(stderr): stderr = self.parse_clixml_stream(stderr) raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (to_native(response.std_out), to_native(stderr))) return response except requests.exceptions.Timeout as exc: raise AnsibleConnectionFailure('winrm connection error: %s' % to_native(exc)) finally: if command_id: self.protocol.cleanup_command(self.shell_id, command_id) def _connect(self): if not HAS_WINRM: raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR)) elif not HAS_XMLTODICT: raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR)) super(Connection, self)._connect() if not self.protocol: self._build_winrm_kwargs() # build the kwargs from the options set self.protocol = self._winrm_connect() self._connected = True return self def reset(self): self.protocol = None self.shell_id = None self._connect() def _wrapper_payload_stream(self, payload, buffer_size=200000): payload_bytes = to_bytes(payload) byte_count = len(payload_bytes) for i in range(0, byte_count, buffer_size): yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count def exec_command(self, cmd, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) # TODO: display something meaningful here display.vvv("EXEC (via pipeline wrapper)") stdin_iterator = None if in_data: stdin_iterator = self._wrapper_payload_stream(in_data) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator) result.std_out = to_bytes(result.std_out) result.std_err = to_bytes(result.std_err) # parse just stderr from CLIXML output if result.std_err.startswith(b"#< CLIXML"): try: result.std_err = _parse_clixml(result.std_err) except Exception: # unsure if we're guaranteed a valid xml doc- use raw output in case of error pass return (result.status_code, result.std_out, result.std_err) # FUTURE: determine buffer size at runtime via remote winrm config? def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000): in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict')) offset = 0 with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: for out_data in iter((lambda: in_file.read(buffer_size)), b''): offset += len(out_data) self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host) # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded b64_data = base64.b64encode(out_data) + b'\r\n' # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal yield b64_data, (in_file.tell() == in_size) if offset == 0: # empty file, return an empty buffer + eof to close it yield "", True def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) out_path = self._shell._unquote(out_path) display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path)) script_template = u''' begin {{ $path = '{0}' $DebugPreference = "Continue" $ErrorActionPreference = "Stop" Set-StrictMode -Version 2 $fd = [System.IO.File]::Create($path) $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() $bytes = @() #initialize for empty file case }} process {{ $bytes = [System.Convert]::FromBase64String($input) $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null $fd.Write($bytes, 0, $bytes.Length) }} end {{ $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant() $fd.Close() Write-Output "{{""sha1"":""$hash""}}" }} ''' script = script_template.format(self._shell._escape(out_path)) cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path)) # TODO: improve error handling if result.status_code != 0: raise AnsibleError(to_native(result.std_err)) put_output = json.loads(result.std_out) remote_sha1 = put_output.get("sha1") if not remote_sha1: raise AnsibleError("Remote sha1 was not returned") local_sha1 = secure_hash(in_path) if not remote_sha1 == local_sha1: raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1))) def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) in_path = self._shell._unquote(in_path) out_path = out_path.replace('\\', '/') # consistent with other connection plugins, we assume the caller has created the target dir display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) buffer_size = 2**19 # 0.5MB chunks out_file = None try: offset = 0 while True: try: script = ''' $path = "%(path)s" If (Test-Path -Path $path -PathType Leaf) { $buffer_size = %(buffer_size)d $offset = %(offset)d $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite) $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null $buffer = New-Object -TypeName byte[] $buffer_size $bytes_read = $stream.Read($buffer, 0, $buffer_size) if ($bytes_read -gt 0) { $bytes = $buffer[0..($bytes_read - 1)] [System.Convert]::ToBase64String($bytes) } $stream.Close() > $null } ElseIf (Test-Path -Path $path -PathType Container) { Write-Host "[DIR]"; } Else { Write-Error "$path does not exist"; Exit 1; } ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset) display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host) cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(to_native(result.std_err)) if result.std_out.strip() == '[DIR]': data = None else: data = base64.b64decode(result.std_out.strip()) if data is None: break else: if not out_file: # If out_path is a directory and we're expecting a file, bail out now. if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')): break out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') out_file.write(data) if len(data) < buffer_size: break offset += len(data) except Exception: traceback.print_exc() raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path)) finally: if out_file: out_file.close() def close(self): if self.protocol and self.shell_id: display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host) self.protocol.close_shell(self.shell_id) self.shell_id = None self.protocol = None self._connected = False
gpl-3.0
perimosocordiae/scipy
scipy/interpolate/_pade.py
18
1798
from numpy import zeros, asarray, eye, poly1d, hstack, r_ from scipy import linalg __all__ = ["pade"] def pade(an, m, n=None): """ Return Pade approximation to a polynomial as the ratio of two polynomials. Parameters ---------- an : (N,) array_like Taylor series coefficients. m : int The order of the returned approximating polynomial `q`. n : int, optional The order of the returned approximating polynomial `p`. By default, the order is ``len(an)-m``. Returns ------- p, q : Polynomial class The Pade approximation of the polynomial defined by `an` is ``p(x)/q(x)``. Examples -------- >>> from scipy.interpolate import pade >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0] >>> p, q = pade(e_exp, 2) >>> e_exp.reverse() >>> e_poly = np.poly1d(e_exp) Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)`` >>> e_poly(1) 2.7166666666666668 >>> p(1)/q(1) 2.7179487179487181 """ an = asarray(an) if n is None: n = len(an) - 1 - m if n < 0: raise ValueError("Order of q <m> must be smaller than len(an)-1.") if n < 0: raise ValueError("Order of p <n> must be greater than 0.") N = m + n if N > len(an)-1: raise ValueError("Order of q+p <m+n> must be smaller than len(an).") an = an[:N+1] Akj = eye(N+1, n+1, dtype=an.dtype) Bkj = zeros((N+1, m), dtype=an.dtype) for row in range(1, m+1): Bkj[row,:row] = -(an[:row])[::-1] for row in range(m+1, N+1): Bkj[row,:] = -(an[row-m:row])[::-1] C = hstack((Akj, Bkj)) pq = linalg.solve(C, an) p = pq[:n+1] q = r_[1.0, pq[n+1:]] return poly1d(p[::-1]), poly1d(q[::-1])
bsd-3-clause
furf/pledge_service
lib/mailchimp/requests/packages/chardet/chardistribution.py
2755
9226
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE, EUCTW_TYPICAL_DISTRIBUTION_RATIO) from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE, EUCKR_TYPICAL_DISTRIBUTION_RATIO) from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE, GB2312_TYPICAL_DISTRIBUTION_RATIO) from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE, BIG5_TYPICAL_DISTRIBUTION_RATIO) from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE, JIS_TYPICAL_DISTRIBUTION_RATIO) from .compat import wrap_ord ENOUGH_DATA_THRESHOLD = 1024 SURE_YES = 0.99 SURE_NO = 0.01 MINIMUM_DATA_THRESHOLD = 3 class CharDistributionAnalysis: def __init__(self): # Mapping table to get frequency order from char order (get from # GetOrder()) self._mCharToFreqOrder = None self._mTableSize = None # Size of above table # This is a constant value which varies from language to language, # used in calculating confidence. See # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html # for further detail. self._mTypicalDistributionRatio = None self.reset() def reset(self): """reset analyser, clear any state""" # If this flag is set to True, detection is done and conclusion has # been made self._mDone = False self._mTotalChars = 0 # Total characters encountered # The number of characters whose frequency order is less than 512 self._mFreqChars = 0 def feed(self, aBuf, aCharLen): """feed a character with known length""" if aCharLen == 2: # we only care about 2-bytes character in our distribution analysis order = self.get_order(aBuf) else: order = -1 if order >= 0: self._mTotalChars += 1 # order is valid if order < self._mTableSize: if 512 > self._mCharToFreqOrder[order]: self._mFreqChars += 1 def get_confidence(self): """return confidence based on existing data""" # if we didn't receive any character in our consideration range, # return negative answer if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD: return SURE_NO if self._mTotalChars != self._mFreqChars: r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars) * self._mTypicalDistributionRatio)) if r < SURE_YES: return r # normalize confidence (we don't want to be 100% sure) return SURE_YES def got_enough_data(self): # It is not necessary to receive all data to draw conclusion. # For charset detection, certain amount of data is enough return self._mTotalChars > ENOUGH_DATA_THRESHOLD def get_order(self, aBuf): # We do not handle characters based on the original encoding string, # but convert this encoding string to a number, here called order. # This allows multiple encodings of a language to share one frequency # table. return -1 class EUCTWDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = EUCTWCharToFreqOrder self._mTableSize = EUCTW_TABLE_SIZE self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for euc-TW encoding, we are interested # first byte range: 0xc4 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char = wrap_ord(aBuf[0]) if first_char >= 0xC4: return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1 else: return -1 class EUCKRDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = EUCKRCharToFreqOrder self._mTableSize = EUCKR_TABLE_SIZE self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for euc-KR encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char = wrap_ord(aBuf[0]) if first_char >= 0xB0: return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1 else: return -1 class GB2312DistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = GB2312CharToFreqOrder self._mTableSize = GB2312_TABLE_SIZE self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for GB2312 encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) if (first_char >= 0xB0) and (second_char >= 0xA1): return 94 * (first_char - 0xB0) + second_char - 0xA1 else: return -1 class Big5DistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = Big5CharToFreqOrder self._mTableSize = BIG5_TABLE_SIZE self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for big5 encoding, we are interested # first byte range: 0xa4 -- 0xfe # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe # no validation needed here. State machine has done that first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) if first_char >= 0xA4: if second_char >= 0xA1: return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 else: return 157 * (first_char - 0xA4) + second_char - 0x40 else: return -1 class SJISDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = JISCharToFreqOrder self._mTableSize = JIS_TABLE_SIZE self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for sjis encoding, we are interested # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe # no validation needed here. State machine has done that first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) if (first_char >= 0x81) and (first_char <= 0x9F): order = 188 * (first_char - 0x81) elif (first_char >= 0xE0) and (first_char <= 0xEF): order = 188 * (first_char - 0xE0 + 31) else: return -1 order = order + second_char - 0x40 if second_char > 0x7F: order = -1 return order class EUCJPDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) self._mCharToFreqOrder = JISCharToFreqOrder self._mTableSize = JIS_TABLE_SIZE self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO def get_order(self, aBuf): # for euc-JP encoding, we are interested # first byte range: 0xa0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that char = wrap_ord(aBuf[0]) if char >= 0xA0: return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1 else: return -1
apache-2.0
RelentlessCoffee/tea2
product.py
1
1495
import requests import bs4 import json class NoMaxQuantity(Exception): pass def load_single_page(url): response = requests.get(url) soup = bs4.BeautifulSoup(response.text, 'html.parser') return soup def _find_options_from_form(soup): form = soup.find("form", class_="variations_form cart") if form is None: raise NoMaxQuantity("no max quantity") product_data = form['data-product_variations'] product_data = json.loads(product_data) quantities = [option["max_qty"] for option in product_data] options = soup.find_all("option")[1:] options = [option["value"] for option in options] # return list(zip(options, quantities)) option_dicts = [] for option, quantity in zip(options, quantities): option_dicts.append({ "name": option, "quantity": quantity }) return option_dicts def _find_options_from_input(soup): product_data = soup.find("input", class_="input-text qty text") if product_data is None: raise NoMaxQuantity("no max quantity") quantity = product_data['max'] if quantity == "": raise NoMaxQuantity("no max quantity") return [{ "name": find_name(soup), "quantity": int(quantity) }] def find_options(soup): try: return _find_options_from_form(soup) except NoMaxQuantity: return _find_options_from_input(soup) def find_name(soup): title = soup.find("h1") return title.string
mit
onitake/ansible
lib/ansible/modules/system/cronvar.py
73
13521
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Cronvar Plugin: The goal of this plugin is to provide an idempotent # method for set cron variable values. It should play well with the # existing cron module as well as allow for manually added variables. # Each variable entered will be preceded with a comment describing the # variable so that it can be found later. This is required to be # present in order for this plugin to find/modify the variable # This module is based on the crontab module. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: cronvar short_description: Manage variables in crontabs description: - Use this module to manage crontab variables. This module allows you to create, update, or delete cron variable definitions. version_added: "2.0" options: name: description: - Name of the crontab variable. required: yes value: description: - The value to set this variable to. - Required if C(state=present). insertafter: description: - If specified, the variable will be inserted after the variable specified. - Used with C(state=present). insertbefore: description: - Used with C(state=present). If specified, the variable will be inserted just before the variable specified. state: description: - Whether to ensure that the variable is present or absent. choices: [ absent, present ] default: present user: description: - The specific user whose crontab should be modified. default: root cron_file: description: - If specified, uses this file instead of an individual user's crontab. Without a leading /, this is assumed to be in /etc/cron.d. With a leading /, this is taken as absolute. backup: description: - If set, create a backup of the crontab before it is modified. The location of the backup is returned in the C(backup) variable by this module. type: bool default: 'no' requirements: - cron author: - Doug Luce (@dougluce) """ EXAMPLES = ''' - name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists cronvar: name: EMAIL value: doug@ansibmod.con.com - name: Ensure a variable does not exist. This may remove any variable named "LEGACY" cronvar: name: LEGACY state: absent - name: Add a variable to a file under /etc/cron.d cronvar: name: LOGFILE value: /var/log/yum-autoupdate.log user: root cron_file: ansible_yum-autoupdate ''' import os import pipes import platform import pwd import re import shlex import sys import tempfile from ansible.module_utils.basic import AnsibleModule CRONCMD = "/usr/bin/crontab" class CronVarError(Exception): pass class CronVar(object): """ CronVar object to write variables to crontabs. user - the user of the crontab (defaults to root) cron_file - a cron file under /etc/cron.d """ def __init__(self, module, user=None, cron_file=None): self.module = module self.user = user self.lines = None self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',)) if cron_file: self.cron_file = "" if os.path.isabs(cron_file): self.cron_file = cron_file else: self.cron_file = os.path.join('/etc/cron.d', cron_file) else: self.cron_file = None self.read() def read(self): # Read in the crontab from the system self.lines = [] if self.cron_file: # read the cronfile try: f = open(self.cron_file, 'r') self.lines = f.read().splitlines() f.close() except IOError: # cron file does not exist return except: raise CronVarError("Unexpected error:", sys.exc_info()[0]) else: # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) if rc != 0 and rc != 1: # 1 can mean that there are no jobs. raise CronVarError("Unable to read crontab") lines = out.splitlines() count = 0 for l in lines: if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): self.lines.append(l) count += 1 def log_message(self, message): self.module.debug('ansible: "%s"' % message) def write(self, backup_file=None): """ Write the crontab to the system. Saves all information. """ if backup_file: fileh = open(backup_file, 'w') elif self.cron_file: fileh = open(self.cron_file, 'w') else: filed, path = tempfile.mkstemp(prefix='crontab') fileh = os.fdopen(filed, 'w') fileh.write(self.render()) fileh.close() # return if making a backup if backup_file: return # Add the entire crontab back to the user crontab if not self.cron_file: # quoting shell args for now but really this should be two non-shell calls. FIXME (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) os.unlink(path) if rc != 0: self.module.fail_json(msg=err) def remove_variable_file(self): try: os.unlink(self.cron_file) return True except OSError: # cron file does not exist return False except: raise CronVarError("Unexpected error:", sys.exc_info()[0]) def parse_for_var(self, line): lexer = shlex.shlex(line) lexer.wordchars = self.wordchars varname = lexer.get_token() is_env_var = lexer.get_token() == '=' value = ''.join(lexer) if is_env_var: return (varname, value) raise CronVarError("Not a variable.") def find_variable(self, name): for l in self.lines: try: (varname, value) = self.parse_for_var(l) if varname == name: return value except CronVarError: pass return None def get_var_names(self): var_names = [] for l in self.lines: try: (var_name, _) = self.parse_for_var(l) var_names.append(var_name) except CronVarError: pass return var_names def add_variable(self, name, value, insertbefore, insertafter): if insertbefore is None and insertafter is None: # Add the variable to the top of the file. self.lines.insert(0, "%s=%s" % (name, value)) else: newlines = [] for l in self.lines: try: (varname, _) = self.parse_for_var(l) # Throws if not a var line if varname == insertbefore: newlines.append("%s=%s" % (name, value)) newlines.append(l) elif varname == insertafter: newlines.append(l) newlines.append("%s=%s" % (name, value)) else: raise CronVarError # Append. except CronVarError: newlines.append(l) self.lines = newlines def remove_variable(self, name): self.update_variable(name, None, remove=True) def update_variable(self, name, value, remove=False): newlines = [] for l in self.lines: try: (varname, _) = self.parse_for_var(l) # Throws if not a var line if varname != name: raise CronVarError # Append. if not remove: newlines.append("%s=%s" % (name, value)) except CronVarError: newlines.append(l) self.lines = newlines def render(self): """ Render a proper crontab """ result = '\n'.join(self.lines) if result and result[-1] not in ['\n', '\r']: result += '\n' return result def _read_user_execute(self): """ Returns the command line for reading a crontab """ user = '' if self.user: if platform.system() == 'SunOS': return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) elif platform.system() == 'AIX': return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) elif platform.system() == 'HP-UX': return "%s %s %s" % (CRONCMD, '-l', pipes.quote(self.user)) elif pwd.getpwuid(os.getuid())[0] != self.user: user = '-u %s' % pipes.quote(self.user) return "%s %s %s" % (CRONCMD, user, '-l') def _write_execute(self, path): """ Return the command line for writing a crontab """ user = '' if self.user: if platform.system() in ['SunOS', 'HP-UX', 'AIX']: return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) elif pwd.getpwuid(os.getuid())[0] != self.user: user = '-u %s' % pipes.quote(self.user) return "%s %s %s" % (CRONCMD, user, pipes.quote(path)) # ================================================== def main(): # The following example playbooks: # # - cronvar: name="SHELL" value="/bin/bash" # # - name: Set the email # cronvar: name="EMAILTO" value="doug@ansibmod.con.com" # # - name: Get rid of the old new host variable # cronvar: name="NEW_HOST" state=absent # # Would produce: # SHELL = /bin/bash # EMAILTO = doug@ansibmod.con.com module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), value=dict(type='str'), user=dict(type='str'), cron_file=dict(type='str'), insertafter=dict(type='str'), insertbefore=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present']), backup=dict(type='bool', default=False), ), mutually_exclusive=[['insertbefore', 'insertafter']], supports_check_mode=False, ) name = module.params['name'] value = module.params['value'] user = module.params['user'] cron_file = module.params['cron_file'] insertafter = module.params['insertafter'] insertbefore = module.params['insertbefore'] state = module.params['state'] backup = module.params['backup'] ensure_present = state == 'present' changed = False res_args = dict() # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. os.umask(int('022', 8)) cronvar = CronVar(module, user, cron_file) module.debug('cronvar instantiated - name: "%s"' % name) # --- user input validation --- if name is None and ensure_present: module.fail_json(msg="You must specify 'name' to insert a new cron variabale") if value is None and ensure_present: module.fail_json(msg="You must specify 'value' to insert a new cron variable") if name is None and not ensure_present: module.fail_json(msg="You must specify 'name' to remove a cron variable") # if requested make a backup before making a change if backup: (_, backup_file) = tempfile.mkstemp(prefix='cronvar') cronvar.write(backup_file) if cronvar.cron_file and not name and not ensure_present: changed = cronvar.remove_job_file() module.exit_json(changed=changed, cron_file=cron_file, state=state) old_value = cronvar.find_variable(name) if ensure_present: if old_value is None: cronvar.add_variable(name, value, insertbefore, insertafter) changed = True elif old_value != value: cronvar.update_variable(name, value) changed = True else: if old_value is not None: cronvar.remove_variable(name) changed = True res_args = { "vars": cronvar.get_var_names(), "changed": changed } if changed: cronvar.write() # retain the backup only if crontab or cron file have changed if backup: if changed: res_args['backup_file'] = backup_file else: os.unlink(backup_file) if cron_file: res_args['cron_file'] = cron_file module.exit_json(**res_args) if __name__ == '__main__': main()
gpl-3.0
DavidAndreev/indico
indico/modules/events/timetable/blueprint.py
1
6179
# This file is part of Indico. # Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from indico.modules.events.timetable.controllers.display import (RHTimetable, RHTimetableEntryInfo, RHTimetableExportPDF, RHTimetableExportDefaultPDF) from indico.modules.events.timetable.controllers.legacy import (RHLegacyTimetableAddContribution, RHLegacyTimetableAddBreak, RHLegacyTimetableAddSession, RHLegacyTimetableAddSessionBlock, RHLegacyTimetableDeleteEntry, RHLegacyTimetableGetUnscheduledContributions, RHLegacyTimetableScheduleContribution, RHLegacyTimetableReschedule, RHLegacyTimetableFitBlock, RHLegacyTimetableEditEntry, RHLegacyTimetableEditEntryTime, RHLegacyTimetableMoveEntry, RHLegacyTimetableEditEntryDateTime, RHLegacyTimetableShiftEntries, RHLegacyTimetableSwapEntries, RHLegacyTimetableEditSession, RHLegacyTimetableBreakREST) from indico.modules.events.timetable.controllers.manage import (RHManageTimetable, RHManageSessionTimetable, RHTimetableREST, RHManageTimetableEntryInfo) from indico.web.flask.wrappers import IndicoBlueprint _bp = IndicoBlueprint('timetable', __name__, template_folder='templates', virtual_template_folder='events/timetable', url_prefix='/event/<confId>') # Management _bp.add_url_rule('/manage/timetable/', 'management', RHManageTimetable) _bp.add_url_rule('/manage/timetable/', 'timetable_rest', RHTimetableREST, methods=('POST',)) _bp.add_url_rule('/manage/timetable/<int:entry_id>', 'timetable_rest', RHTimetableREST, methods=('PATCH', 'DELETE')) _bp.add_url_rule('/manage/timetable/session/<int:session_id>/', 'manage_session', RHManageSessionTimetable) # Timetable legacy operations _bp.add_url_rule('/manage/timetable/add-session', 'add_session', RHLegacyTimetableAddSession, methods=('GET', 'POST')) _bp.add_url_rule('/manage/timetable/break/<int:break_id>', 'legacy_break_rest', RHLegacyTimetableBreakREST, methods=('PATCH',)) with _bp.add_prefixed_rules('/manage/timetable/session/<int:session_id>', '/manage/timetable'): _bp.add_url_rule('/', 'session_rest', RHLegacyTimetableEditSession, methods=('PATCH',)) _bp.add_url_rule('/entry/<int:entry_id>/info', 'entry_info_manage', RHManageTimetableEntryInfo) _bp.add_url_rule('/entry/<int:entry_id>/delete', 'delete_entry', RHLegacyTimetableDeleteEntry, methods=('POST',)) _bp.add_url_rule('/entry/<int:entry_id>/move', 'move_entry', RHLegacyTimetableMoveEntry, methods=('GET', 'POST')) _bp.add_url_rule('/entry/<int:entry_id>/shift', 'shift_entries', RHLegacyTimetableShiftEntries, methods=('POST',)) _bp.add_url_rule('/entry/<int:entry_id>/swap', 'swap_entries', RHLegacyTimetableSwapEntries, methods=('POST',)) _bp.add_url_rule('/entry/<int:entry_id>/edit/', 'edit_entry', RHLegacyTimetableEditEntry, methods=('GET', 'POST')) _bp.add_url_rule('/entry/<int:entry_id>/edit/time', 'edit_entry_time', RHLegacyTimetableEditEntryTime, methods=('GET', 'POST')) _bp.add_url_rule('/entry/<int:entry_id>/edit/datetime', 'edit_entry_datetime', RHLegacyTimetableEditEntryDateTime, methods=('POST',)) _bp.add_url_rule('/block/<block_id>/schedule', 'schedule', RHLegacyTimetableScheduleContribution, methods=('POST',)) _bp.add_url_rule('/block/<block_id>/fit', 'fit_session_block', RHLegacyTimetableFitBlock, methods=('POST',)) _bp.add_url_rule('/not-scheduled', 'not_scheduled', RHLegacyTimetableGetUnscheduledContributions) _bp.add_url_rule('/schedule', 'schedule', RHLegacyTimetableScheduleContribution, methods=('POST',)) _bp.add_url_rule('/reschedule', 'reschedule', RHLegacyTimetableReschedule, methods=('POST',)) _bp.add_url_rule('/add-break', 'add_break', RHLegacyTimetableAddBreak, methods=('GET', 'POST')) _bp.add_url_rule('/add-contribution', 'add_contribution', RHLegacyTimetableAddContribution, methods=('GET', 'POST')) _bp.add_url_rule('/add-session-block', 'add_session_block', RHLegacyTimetableAddSessionBlock, methods=('GET', 'POST')) # Display _bp.add_url_rule('/timetable/', 'timetable', RHTimetable) _bp.add_url_rule('/timetable/pdf', 'export_pdf', RHTimetableExportPDF, methods=('GET', 'POST')) _bp.add_url_rule('/timetable/timetable.pdf', 'export_default_pdf', RHTimetableExportDefaultPDF) _bp.add_url_rule('/timetable/entry/<int:entry_id>/info', 'entry_info', RHTimetableEntryInfo)
gpl-3.0
cogniteev/flask-oauthlib
tests/oauth1/server.py
16
7152
# coding: utf-8 from flask import g, render_template, request, jsonify from flask_sqlalchemy import SQLAlchemy from flask_oauthlib.provider import OAuth1Provider db = SQLAlchemy() def enable_log(name='flask_oauthlib'): import logging logger = logging.getLogger(name) logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.DEBUG) # enable_log() class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(40), unique=True, index=True, nullable=False) class Client(db.Model): # id = db.Column(db.Integer, primary_key=True) # human readable name client_key = db.Column(db.String(40), primary_key=True) client_secret = db.Column(db.String(55), unique=True, index=True, nullable=False) rsa_key = db.Column(db.String(55)) _realms = db.Column(db.Text) _redirect_uris = db.Column(db.Text) @property def user(self): return User.query.get(1) @property def redirect_uris(self): if self._redirect_uris: return self._redirect_uris.split() return [] @property def default_redirect_uri(self): return self.redirect_uris[0] @property def default_realms(self): if self._realms: return self._realms.split() return [] class Grant(db.Model): id = db.Column(db.Integer, primary_key=True) user_id = db.Column( db.Integer, db.ForeignKey('user.id', ondelete='CASCADE') ) user = db.relationship('User') client_key = db.Column( db.String(40), db.ForeignKey('client.client_key'), nullable=False, ) client = db.relationship('Client') token = db.Column(db.String(255), index=True, unique=True) secret = db.Column(db.String(255), nullable=False) verifier = db.Column(db.String(255)) expires = db.Column(db.DateTime) redirect_uri = db.Column(db.Text) _realms = db.Column(db.Text) def delete(self): db.session.delete(self) db.session.commit() return self @property def realms(self): if self._realms: return self._realms.split() return [] class Token(db.Model): id = db.Column(db.Integer, primary_key=True) client_key = db.Column( db.String(40), db.ForeignKey('client.client_key'), nullable=False, ) client = db.relationship('Client') user_id = db.Column( db.Integer, db.ForeignKey('user.id'), ) user = db.relationship('User') token = db.Column(db.String(255)) secret = db.Column(db.String(255)) _realms = db.Column(db.Text) @property def realms(self): if self._realms: return self._realms.split() return [] def prepare_app(app): db.init_app(app) db.app = app db.create_all() client1 = Client( client_key='dev', client_secret='dev', _redirect_uris=( 'http://localhost:8000/authorized ' 'http://localhost/authorized' ), _realms='email', ) user = User(username='admin') try: db.session.add(client1) db.session.add(user) db.session.commit() except: db.session.rollback() return app def create_server(app): app = prepare_app(app) oauth = OAuth1Provider(app) @oauth.clientgetter def get_client(client_key): return Client.query.filter_by(client_key=client_key).first() @oauth.tokengetter def load_access_token(client_key, token, *args, **kwargs): t = Token.query.filter_by(client_key=client_key, token=token).first() return t @oauth.tokensetter def save_access_token(token, req): tok = Token( client_key=req.client.client_key, user_id=req.user.id, token=token['oauth_token'], secret=token['oauth_token_secret'], _realms=token['oauth_authorized_realms'], ) db.session.add(tok) db.session.commit() @oauth.grantgetter def load_request_token(token): grant = Grant.query.filter_by(token=token).first() return grant @oauth.grantsetter def save_request_token(token, oauth): if oauth.realms: realms = ' '.join(oauth.realms) else: realms = None grant = Grant( token=token['oauth_token'], secret=token['oauth_token_secret'], client_key=oauth.client.client_key, redirect_uri=oauth.redirect_uri, _realms=realms, ) db.session.add(grant) db.session.commit() return grant @oauth.verifiergetter def load_verifier(verifier, token): return Grant.query.filter_by(verifier=verifier, token=token).first() @oauth.verifiersetter def save_verifier(token, verifier, *args, **kwargs): tok = Grant.query.filter_by(token=token).first() tok.verifier = verifier['oauth_verifier'] tok.user_id = g.user.id db.session.add(tok) db.session.commit() return tok @oauth.noncegetter def load_nonce(*args, **kwargs): return None @oauth.noncesetter def save_nonce(*args, **kwargs): return None @app.before_request def load_current_user(): user = User.query.get(1) g.user = user @app.route('/home') def home(): return render_template('home.html') @app.route('/oauth/authorize', methods=['GET', 'POST']) @oauth.authorize_handler def authorize(*args, **kwargs): # NOTICE: for real project, you need to require login if request.method == 'GET': # render a page for user to confirm the authorization return render_template('confirm.html') confirm = request.form.get('confirm', 'no') return confirm == 'yes' @app.route('/oauth/request_token') @oauth.request_token_handler def request_token(): return {} @app.route('/oauth/access_token') @oauth.access_token_handler def access_token(): return {} @app.route('/api/email') @oauth.require_oauth('email') def email_api(): oauth = request.oauth return jsonify(email='me@oauth.net', username=oauth.user.username) @app.route('/api/address/<city>') @oauth.require_oauth('address') def address_api(city): oauth = request.oauth return jsonify(address=city, username=oauth.user.username) @app.route('/api/method', methods=['GET', 'POST', 'PUT', 'DELETE']) @oauth.require_oauth() def method_api(): return jsonify(method=request.method) return app if __name__ == '__main__': from flask import Flask app = Flask(__name__) app.debug = True app.secret_key = 'development' app.config.update({ 'SQLALCHEMY_DATABASE_URI': 'sqlite:///oauth1.sqlite', 'OAUTH1_PROVIDER_ENFORCE_SSL': False, 'OAUTH1_PROVIDER_KEY_LENGTH': (3, 30), 'OAUTH1_PROVIDER_REALMS': ['email', 'address'] }) app = create_server(app) app.run()
bsd-3-clause
marineam/nagcat
python/nagcat/query.py
1
9568
# Copyright 2008-2009 ITA Software, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Query and friends. All requests are defined as a Query class which is a Runnable. """ import errno from twisted.internet import defer, reactor from twisted.internet import error as neterror try: from OpenSSL import SSL, crypto from twisted.internet import ssl except ImportError: SSL = None from nagcat import errors, filters, log, plugin, runnable, util class QueryManager(object): def __init__(self, nagcat): self._nagcat = nagcat self._queries = {} def new_query(self, conf, qcls=None): """Create a new query and register it or return an existing one""" # Find the correct Query class for this type if not qcls: qtype = conf.get('type') qcls = plugin.search(IQuery, qtype, None) if not qcls: raise errors.ConfigError(conf, "Unknown query type '%s'" % qtype) qobj = qcls(self._nagcat, conf) key = str(qobj) if key in self._queries: log.debug("Reusing query '%s'", key) qobj = self._queries[key] qobj.update(conf) else: log.debug("Adding query '%s'", key) self._queries[key] = qobj return qobj class IQuery(plugin.INagcatPlugin): """Interface for finding Query plugin classes""" class Query(runnable.Runnable): """Query objects make a single request or run a single process as defined in its configuration. The only state they may contain when it is not running is the results from the last run (be it real data or a Failure object). All state that defines a query *MUST* be saved on self.conf and never changed after __init__ to allow identical queries to be identified reliably. Query objects are only used by SimpleTest objects. """ type = "Query" def __init__(self, nagcat, conf): super(Query, self).__init__(conf) # self.conf must contain all configuration variables that # this object uses so identical Queries can be identified. self.conf = {} # Used by the save filter and by queries to report any # extra pieces of metadata such as Request ID/URL. self.saved = {} # Semi-fatal init errors, forces query to UNKNOWN self.init_errors = [] # All queries should handle timeouts try: interval = util.Interval( conf.get('timeout', nagcat.default_timeout)) self.conf['timeout'] = interval.seconds except util.IntervalError, ex: raise errors.ConfigError(conf, "Invalid timeout: %s" % ex) if self.conf['timeout'] <= 0: raise errors.ConfigError(conf, "Invalid timeout value '%s'" % conf.get('timeout')) def _start_self(self): self.saved.clear() if self.init_errors: msg = '\n'.join(self.init_errors) return defer.fail(errors.Failure(errors.TestUnknown(msg))) else: return super(Query, self)._start_self() @errors.callback def _failure_tcp(self, result): """Catch common TCP failures and convert them to a TestError""" if isinstance(result.value, neterror.TimeoutError): raise errors.TestCritical("TCP Error: handshake timeout") elif isinstance(result.value, neterror.ConnectionRefusedError): raise errors.TestCritical("TCP Error: connection refused") elif isinstance(result.value, neterror.ConnectionLost): raise errors.TestCritical("TCP Error: connection lost " \ "unexpectedly") elif isinstance(result.value, neterror.ConnectError): if result.value.osError == errno.EMFILE: log.error("Too many open files! Restart with a new ulimit -n") raise errors.TestAbort("NAGCAT ERROR: %s" % result.value) raise errors.TestCritical("TCP Error: %s" % result.value) return result def _connect(self, factory): # Split out the reactor.connect call to allow for easy # overriding in SSLMixin for adding SSL support. reactor.connectTCP(self.addr, self.conf['port'], factory, self.conf['timeout']) def __str__(self): return "<%s %r>" % (self.__class__.__name__, self.conf) def update(self, conf): """Update a reused Query object. When a query object is reused for a new query it will be given the new query's config via this method. In most cases all we need to do is select the lower of the two repeat values. """ try: repeat = util.Interval(conf.get('repeat', '1m')) except util.IntervalError, ex: raise errors.ConfigError(conf, "Invalid repeat: %s" % ex) if self.repeat > repeat: self.repeat = repeat class SSLMixin(Query): """Mixin class for adding SSL support to a query. Note that subclasses must set self.conf['port'] Example usage: >>> class HTTPS(SSLMixin, HTTP): >>> pass """ def __init__(self, nagcat, conf): super(SSLMixin, self).__init__(nagcat, conf) if SSL is None: raise errors.InitError("pyOpenSSL is required for SSL support.") for opt in ('key', 'cert', 'cacert'): self.conf['ssl_'+opt] = conf.get('ssl_'+opt, None) key_type = str(conf.get('ssl_'+opt+'_type', '')) if not key_type or key_type.upper() == "PEM": key_type = crypto.FILETYPE_PEM elif key_type.upper() == "ASN1": key_type = crypto.FILETYPE_ASN1 else: raise errors.InitError("Invalid ssl_%s_type %r, " "must be 'PEM' or 'ASN1'" % (opt, key_type)) self.conf['ssl_%s_type'%opt] = key_type def maybe_read(key, private=False): filetype = self.conf[key+'_type'] path = self.conf[key] if not path: return None log.debug("Loading %s from %s", key, path) try: fd = open(path) try: data = fd.read() finally: fd.close() except IOError, ex: self.init_errors.append("Failed to read %s file %s: %s" % (key, path, ex.strerror)) return None log.trace("Loaded %s:\n%s", key, data) if private: return crypto.load_privatekey(filetype, data) else: return crypto.load_certificate(filetype, data) cacert = maybe_read('ssl_cacert') if cacert: cacert = [cacert] # Only use both if we can load both key = maybe_read('ssl_key', private=True) cert = maybe_read('ssl_cert') if not (key and cert): key, cert = None, None self.context = ssl.CertificateOptions( privateKey=key, certificate=cert, caCerts=cacert, verify=bool(cacert), method=SSL.SSLv23_METHOD) # Use SSLv23 to support v3 and TLSv1 but disable v2 (below) sslcontext = self.context.getContext() sslcontext.set_options(SSL.OP_NO_SSLv2) @errors.callback def _failure_tcp(self, result): """Also catch SSL errors""" result = super(SSLMixin, self)._failure_tcp(result) if isinstance(result.value, SSL.Error): raise errors.TestCritical("SSL Error: %s" % result.value) return result def _connect(self, factory): reactor.connectSSL(self.addr, self.conf['port'], factory, self.context, self.conf['timeout']) class FilteredQuery(Query): """A query that wraps another query and applies filters to it""" # For the scheduler stats name = "filter" def __init__(self, nagcat, conf): super(FilteredQuery, self).__init__(nagcat, conf) # Create the filter objects filter_list = conf.get('filters', []) for check in ('critical', 'warning', 'expectcritical', 'expectwarning', 'expecterror'): expr = conf.get(check, None) if expr: filter_list.append("%s:%s" % (check, expr)) self._filters = [filters.Filter(self, x) for x in filter_list] self._query = nagcat.new_query(conf) self.conf['filters'] = str(filter_list) self.conf['query'] = str(self._query) self.addDependency(self._query) def _start(self): self.saved.update(self._query.saved) deferred = defer.Deferred() deferred.callback(self._query.result) for filter in self._filters: if filter.handle_errors: deferred.addBoth(filter.filter) else: deferred.addCallback(filter.filter) return deferred
apache-2.0
ZhouJiaLinmumu/Grasp-and-lift-EEG-challenge
lvl2/genEns.py
4
3742
# -*- coding: utf-8 -*- """ Created on Sat Aug 15 14:12:12 2015 @author: rc, alex """ import os import sys if __name__ == '__main__' and __package__ is None: filePath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(filePath) import numpy as np import yaml from copy import deepcopy from collections import OrderedDict from sklearn.metrics import roc_auc_score from sklearn.cross_validation import LeaveOneLabelOut from preprocessing.aux import getEventNames from utils.ensembles import createEnsFunc, loadPredictions, getLvl1ModelList from ensembling.WeightedMean import WeightedMeanClassifier from ensembling.NeuralNet import NeuralNet from ensembling.XGB import XGB def _from_yaml_to_func(method, params): """go from yaml to method. Need to be here for accesing local variables. """ prm = dict() if params is not None: for key, val in params.iteritems(): prm[key] = eval(str(val)) return eval(method)(**prm) # ## here read YAML and build models ### yml = yaml.load(open(sys.argv[1])) fileName = yml['Meta']['file'] if 'subsample' in yml['Meta']: subsample = yml['Meta']['subsample'] else: subsample = 1 modelName, modelParams = yml['Model'].iteritems().next() model_base = _from_yaml_to_func(modelName, modelParams) ensemble = yml['Model'][modelName]['ensemble'] addSubjectID = True if 'addSubjectID' in yml.keys() else False mode = sys.argv[2] if mode == 'val': test = False elif mode == 'test': test = True else: raise('Invalid mode. Please specify either val or test') print('Running %s in mode %s, predictions will be saved as %s' % (modelName,mode,fileName)) ###### cols = getEventNames() ids = np.load('../infos_test.npy') subjects_test = ids[:, 1] series_test = ids[:, 2] ids = ids[:, 0] labels = np.load('../infos_val.npy') subjects = labels[:, -2] series = labels[:, -1] labels = labels[:, :-2] allCols = range(len(cols)) # ## loading predictions ### files = getLvl1ModelList() preds_val = OrderedDict() for f in files: loadPredictions(preds_val, f[0], f[1]) # validity check for m in ensemble: assert(m in preds_val) # ## train/test ### aggr = createEnsFunc(ensemble) dataTrain = aggr(preds_val) preds_val = None # optionally adding subjectIDs if addSubjectID: dataTrain = np.c_[dataTrain, subjects] np.random.seed(4234521) if test: # train the model model = deepcopy(model_base) model.fit(dataTrain[::subsample], labels[::subsample]) dataTrain = None # load test data preds_test = OrderedDict() for f in files: loadPredictions(preds_test, f[0], f[1], test=True) dataTest = aggr(preds_test) preds_test = None # switch to add subjects if addSubjectID: dataTest = np.c_[dataTest, subjects_test] # get predictions p = model.predict_proba(dataTest) np.save('test/test_%s.npy' % fileName, [p]) else: auc_tot = [] p = np.zeros(labels.shape) cv = LeaveOneLabelOut(series) for fold, (train, test) in enumerate(cv): model = deepcopy(model_base) if modelName == 'NeuralNet': # passing also test data to print out test error during training model.fit(dataTrain[train], labels[train], dataTrain[test], labels[test]) else: model.fit(dataTrain[train][::subsample], labels[train][::subsample]) p[test] = model.predict_proba(dataTrain[test]) auc = [roc_auc_score(labels[test][:, col], p[test][:, col]) for col in allCols] auc_tot.append(np.mean(auc)) print('Fold %d, score: %.5f' % (fold, auc_tot[-1])) print('AUC: %.5f' % np.mean(auc_tot)) np.save('val/val_%s.npy' % fileName, [p])
bsd-3-clause
koushikcgit/xen-api
ocaml/idl/binding_sanity_checks/metrics.py
34
5011
#!/usr/bin/env python import XenAPI import sanitychecklib from pprint import pprint, pformat #Generally, we wish to announce the name of this file. #When running in the interpreter, however, this doesn't exist, and we #probably shouldn't log out either try: this_test_name = __file__ logout_after_test = True except NameError: this_test_name = "unknown" logout_after_test = False print "------------", this_test_name #given a list of dictionaries, print selected keys in order from each one, nicely formatted with a title def dictionary_list_partial_print(title, dictionary_list, keys ): bar='-' * len(title) print bar ,'\n', title ,'\n', bar print "\n--\n".join(["\n".join(["%s : %s" % (k, pformat( d[k] )) for k in keys]) for d in dictionary_list]) print bar #log in to the master print "logging in to ",sanitychecklib.server session=sanitychecklib.getsession() sx=session.xenapi #first, we'll find all the hosts, and get the information we care about from each hosts=sx.host.get_all() host_metrics=[{ "name_label" : sx.host.get_name_label(x), "metrics" : sx.host_metrics.get_record(sx.host.get_metrics(x)), "host_cpus" : [sx.host_cpu.get_record(x) for x in sx.host.get_host_CPUs(x)] } for x in hosts] #and print out the interesting bits dictionary_list_partial_print("Host Metrics", host_metrics, ["name_label","metrics", "host_cpus"]) # x, 'VM', 'guest_metrics' -> guest_metrics_record of the VM x # catch the NULL if the record doesn't exist for some reason, and return the string 'NULL' def fetch_metrics_record(object_reference, type_string, metrics_name): record_reference=sx.__getattr__(type_string).__getattr__('get_'+metrics_name)(object_reference) if record_reference=='OpaqueRef:NULL': return 'NULL' else: return sx.__getattr__(type_string+'_'+metrics_name).get_record(record_reference) #find all the virtual machines which are resident on the hosts resident_vms=set() for host in hosts: resident_vms.update(sx.host.get_resident_VMs(host)) #get and print their info vm_metrics = [{ "name_label" : sx.VM.get_name_label(x), "metrics" : fetch_metrics_record(x, 'VM', 'metrics'), "guest_metrics" : fetch_metrics_record(x, 'VM', 'guest_metrics'), } for x in resident_vms] dictionary_list_partial_print("Virtual Machine Metrics", vm_metrics, ["name_label", "metrics", "guest_metrics"]) #from the list of resident VMs we can find all the active VIFs and VBDs #however these don't have useful names, so we have to make them up active_vifs=[vif for vif in sx.VIF.get_all() if sx.VIF.get_VM(vif) in resident_vms] vif_metrics = [{ "name_label" : "VIF connecting \"%s\" to \"%s\"" % (sx.network.get_name_label(sx.VIF.get_network(x)), sx.VM.get_name_label(sx.VIF.get_VM(x))), "metrics" : fetch_metrics_record(x, 'VIF', 'metrics') } for x in active_vifs] dictionary_list_partial_print("VIF metrics", vif_metrics, ["name_label","metrics"]) #the names of the vbds are a little more complicated, because there is the possiblility that a VBD connects #a VM to a CD drive, which may be empty, and thus not have a VDI to represent it. def get_vbd_name(vbd): if sx.VBD.get_type(vbd)=="CD" and sx.VBD.get_empty(vbd)==True: device_name="empty cd drive" else: device_name=sx.VDI.get_name_label(sx.VBD.get_VDI(vbd)) return "VBD connecting \"%s\" to \"%s\"" % (sx.VM.get_name_label(sx.VBD.get_VM(vbd)), device_name) active_vbds=[vbd for vbd in sx.VBD.get_all() if sx.VBD.get_VM(vbd) in resident_vms] vbd_metrics = [{ "name_label" : get_vbd_name(x), "metrics" : fetch_metrics_record(x, 'VBD', 'metrics') } for x in active_vbds ] dictionary_list_partial_print("VBD Metrics", vbd_metrics, ["name_label","metrics"]) #from the VIFs we can find the active networks, which don't actually have any metrics active_networks=set() for vif in active_vifs: active_networks.add(sx.VIF.get_network(vif)) network_metrics=[{ "name_label": sx.network.get_name_label(x) } for x in active_networks] dictionary_list_partial_print("Network Metrics", network_metrics, ["name_label"]) #and from the active networks we can get all the relevant pifs active_pifs=set() for network in active_networks: active_pifs.update(sx.network.get_PIFs(network)) pif_metrics = [{ "name_label" : "%s on %s " % (sx.PIF.get_device(x), sx.host.get_name_label( sx.PIF.get_host(x) )), "metrics" : fetch_metrics_record(x, 'PIF', 'metrics') } for x in active_pifs ] dictionary_list_partial_print("PIF Metrics", pif_metrics, ["name_label","metrics"]) #finish off by printing out a concise list of all the active objects print "Active Objects" for x in ["host_metrics" , "vm_metrics", "vif_metrics", "vbd_metrics", "network_metrics", "pif_metrics" ]: print x, [(y['name_label']) for y in globals()[x]] #log out if logout_after_test: print "logging out" session.logout() print "End of------", this_test_name
lgpl-2.1
SaschaMester/delicium
chrome/common/extensions/docs/server2/cache_chain_object_store.py
53
3339
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from future import All, Future from object_store import ObjectStore class CacheChainObjectStore(ObjectStore): '''Maintains an in-memory cache along with a chain of other object stores to try for the same keys. This is useful for implementing a multi-layered cache. The in-memory cache is inbuilt since it's synchronous, but the object store interface is asynchronous. The rules for the object store chain are: - When setting (or deleting) items, all object stores in the hierarcy will have that item set. - When getting items, the behaviour depends on |start_empty|. - If false, each object store is tried in order. The first object store to find the item will trickle back up, setting it on all object stores higher in the hierarchy. - If true, only the first in-memory cache is checked, as though the store had been initialized with no content as opposed to the union of its delegate stores. ''' def __init__(self, object_stores, start_empty=False): self._object_stores = object_stores self._start_empty = start_empty self._cache = {} def SetMulti(self, mapping): self._cache.update(mapping) return All([object_store.SetMulti(mapping) for object_store in self._object_stores]) def GetMulti(self, keys): missing_keys = list(keys) cached_items = {} for key in keys: if key in self._cache: cached_items[key] = self._cache.get(key) missing_keys.remove(key) if len(missing_keys) == 0 or self._start_empty: return Future(value=cached_items) object_store_futures = [(object_store, object_store.GetMulti(missing_keys)) for object_store in self._object_stores] def resolve(): # Approach: # # Try each object store in order, until there are no more missing keys. # Don't realise the Future value of an object store that we don't need to; # this is important e.g. to avoid querying data store constantly. # # When a value is found, cache it in all object stores further up the # chain, including the object-based cache on CacheChainObjectStore. object_store_updates = [] for object_store, object_store_future in object_store_futures: if len(missing_keys) == 0: break result = object_store_future.Get() for k, v in result.items(): # use items(); changes during iteration if v is None or k not in missing_keys: del result[k] continue self._cache[k] = v cached_items[k] = v missing_keys.remove(k) for _, updates in object_store_updates: updates.update(result) object_store_updates.append((object_store, {})) # Update the caches of all object stores that need it. for object_store, updates in object_store_updates: if updates: object_store.SetMulti(updates) return cached_items return Future(callback=resolve) def DelMulti(self, keys): for k in keys: self._cache.pop(k, None) for object_store in self._object_stores: object_store.DelMulti(keys)
bsd-3-clause
telwertowski/QGIS
tests/src/python/test_authmanager_ogr_postgres.py
15
8858
# -*- coding: utf-8 -*- """ Tests for auth manager Basic Auth access to postgres. This is an integration test for QGIS Desktop Auth Manager postgres provider that checks if QGIS can use a stored auth manager auth configuration to access a username/password protected postgres. Configuration from the environment: * QGIS_POSTGRES_SERVER_PORT (default: 55432) * QGIS_POSTGRES_EXECUTABLE_PATH (default: /usr/lib/postgresql/9.4/bin) From build dir, run: ctest -R PyQgsAuthManagerOgrPostgresTest -V or, if your PostgreSQL path differs from the default: QGIS_POSTGRES_EXECUTABLE_PATH=/usr/lib/postgresql/<your_version_goes_here>/bin \ ctest -R PyQgsAuthManagerOgrPostgresTest -V .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ import os import time import signal import stat import subprocess import tempfile from shutil import rmtree from utilities import unitTestDataPath from qgis.core import ( QgsApplication, QgsAuthManager, QgsAuthMethodConfig, QgsVectorLayer, QgsDataSourceUri, QgsWkbTypes, ) from qgis.PyQt.QtNetwork import QSslCertificate from qgis.testing import ( start_app, unittest, ) __author__ = 'Alessandro Pasotti' __date__ = '03/11/2017' __copyright__ = 'Copyright 2017, The QGIS Project' QGIS_POSTGRES_SERVER_PORT = os.environ.get('QGIS_POSTGRES_SERVER_PORT', '55432') QGIS_POSTGRES_EXECUTABLE_PATH = os.environ.get('QGIS_POSTGRES_EXECUTABLE_PATH', '/usr/lib/postgresql/9.4/bin') assert os.path.exists(QGIS_POSTGRES_EXECUTABLE_PATH) QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp() # Postgres test path QGIS_PG_TEST_PATH = tempfile.mkdtemp() os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH qgis_app = start_app() QGIS_POSTGRES_CONF_TEMPLATE = """ hba_file = '%(tempfolder)s/pg_hba.conf' listen_addresses = '*' port = %(port)s max_connections = 100 unix_socket_directories = '%(tempfolder)s' ssl = true ssl_ciphers = 'DEFAULT:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers ssl_cert_file = '%(server_cert)s' ssl_key_file = '%(server_key)s' ssl_ca_file = '%(sslrootcert_path)s' password_encryption = on """ QGIS_POSTGRES_HBA_TEMPLATE = """ hostssl all all 0.0.0.0/0 md5 hostssl all all ::1/0 md5 host all all 127.0.0.1/32 trust host all all ::1/32 trust host all all 0.0.0.0/0 trust """ class TestAuthManager(unittest.TestCase): @classmethod def setUpAuth(cls): """Run before all tests and set up authentication""" authm = QgsApplication.authManager() assert (authm.setMasterPassword('masterpassword', True)) cls.pg_conf = os.path.join(cls.tempfolder, 'postgresql.conf') cls.pg_hba = os.path.join(cls.tempfolder, 'pg_hba.conf') # Client side cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem') assert os.path.isfile(cls.sslrootcert_path) os.chmod(cls.sslrootcert_path, stat.S_IRUSR) cls.auth_config = QgsAuthMethodConfig("Basic") cls.auth_config.setConfig('username', cls.username) cls.auth_config.setConfig('password', cls.password) cls.auth_config.setName('test_basic_auth_config') cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path) assert cls.sslrootcert is not None authm.storeCertAuthorities(cls.sslrootcert) authm.rebuildCaCertsCache() authm.rebuildTrustedCaCertsCache() authm.rebuildCertTrustCache() assert (authm.storeAuthenticationConfig(cls.auth_config)[0]) assert cls.auth_config.isValid() cls.authcfg = cls.auth_config.id() # Server side cls.server_cert = os.path.join(cls.certsdata_path, 'localhost_ssl_cert.pem') cls.server_key = os.path.join(cls.certsdata_path, 'localhost_ssl_key.pem') cls.server_rootcert = cls.sslrootcert_path os.chmod(cls.server_cert, stat.S_IRUSR) os.chmod(cls.server_key, stat.S_IRUSR) os.chmod(cls.server_rootcert, stat.S_IRUSR) # Place conf in the data folder with open(cls.pg_conf, 'w+') as f: f.write(QGIS_POSTGRES_CONF_TEMPLATE % { 'port': cls.port, 'tempfolder': cls.tempfolder, 'server_cert': cls.server_cert, 'server_key': cls.server_key, 'sslrootcert_path': cls.sslrootcert_path, }) with open(cls.pg_hba, 'w+') as f: f.write(QGIS_POSTGRES_HBA_TEMPLATE) @classmethod def setUpClass(cls): """Run before all tests: Creates an auth configuration""" cls.port = QGIS_POSTGRES_SERVER_PORT cls.username = 'username' cls.password = 'password' cls.dbname = 'test_basic' cls.tempfolder = QGIS_PG_TEST_PATH cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys') cls.hostname = 'localhost' cls.data_path = os.path.join(cls.tempfolder, 'data') os.mkdir(cls.data_path) cls.setUpAuth() subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'initdb'), '-D', cls.data_path]) # Disable SSL verification for setup operations env = dict(os.environ) env['PGSSLMODE'] = 'disable' cls.server = subprocess.Popen([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'postgres'), '-D', cls.data_path, '-c', "config_file=%s" % cls.pg_conf], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait max 10 secs for the server to start end = time.time() + 10 while True: line = cls.server.stderr.readline() print(line) if line.find(b"database system is ready to accept") != -1: break if time.time() > end: raise Exception("Timeout connecting to PostgreSQL") # Create a DB subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'createdb'), '-h', 'localhost', '-p', cls.port, 'test_basic'], env=env) # Inject test SQL from test path test_sql = os.path.join(unitTestDataPath('provider'), 'testdata_pg.sql') subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-f', test_sql, cls.dbname], env=env) # Create a role subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-c', 'CREATE ROLE "%s" WITH SUPERUSER LOGIN PASSWORD \'%s\'' % (cls.username, cls.password), cls.dbname], env=env) @classmethod def tearDownClass(cls): """Run after all tests""" cls.server.terminate() os.kill(cls.server.pid, signal.SIGABRT) del cls.server time.sleep(2) rmtree(QGIS_AUTH_DB_DIR_PATH) rmtree(cls.tempfolder) def setUp(self): """Run before each test.""" pass def tearDown(self): """Run after each test.""" pass @classmethod def _getPostGISLayer(cls, type_name, layer_name=None, authcfg=''): """ PG layer factory """ if layer_name is None: layer_name = 'pg_' + type_name # Warning: OGR needs the schema if it's not the default, so qgis_test.someData connstring = "PG:dbname='%(dbname)s' host='%(hostname)s' port='%(port)s' sslmode='verify-full' sslrootcert='%(sslrootcert)s'%(authcfg)s|layername=qgis_test.someData" % ( { 'dbname': cls.dbname, 'hostname': cls.hostname, 'port': cls.port, 'authcfg': ' authcfg=\'%s\'' % authcfg if authcfg else '', 'sslrootcert': cls.sslrootcert_path, } ) layer = QgsVectorLayer(connstring, layer_name, 'ogr') return layer def testValidAuthAccess(self): """ Access the protected layer with valid credentials """ pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id()) self.assertTrue(pg_layer.isValid()) def testInvalidAuthAccess(self): """ Access the protected layer with not valid credentials """ pg_layer = self._getPostGISLayer('testlayer_èé') self.assertFalse(pg_layer.isValid()) if __name__ == '__main__': unittest.main()
gpl-2.0
mojwang/selenium
py/selenium/webdriver/common/keys.py
53
2347
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ The Keys implementation. """ from __future__ import unicode_literals class Keys(object): """ Set of special keys codes. """ NULL = '\ue000' CANCEL = '\ue001' # ^break HELP = '\ue002' BACKSPACE = '\ue003' BACK_SPACE = BACKSPACE TAB = '\ue004' CLEAR = '\ue005' RETURN = '\ue006' ENTER = '\ue007' SHIFT = '\ue008' LEFT_SHIFT = SHIFT CONTROL = '\ue009' LEFT_CONTROL = CONTROL ALT = '\ue00a' LEFT_ALT = ALT PAUSE = '\ue00b' ESCAPE = '\ue00c' SPACE = '\ue00d' PAGE_UP = '\ue00e' PAGE_DOWN = '\ue00f' END = '\ue010' HOME = '\ue011' LEFT = '\ue012' ARROW_LEFT = LEFT UP = '\ue013' ARROW_UP = UP RIGHT = '\ue014' ARROW_RIGHT = RIGHT DOWN = '\ue015' ARROW_DOWN = DOWN INSERT = '\ue016' DELETE = '\ue017' SEMICOLON = '\ue018' EQUALS = '\ue019' NUMPAD0 = '\ue01a' # number pad keys NUMPAD1 = '\ue01b' NUMPAD2 = '\ue01c' NUMPAD3 = '\ue01d' NUMPAD4 = '\ue01e' NUMPAD5 = '\ue01f' NUMPAD6 = '\ue020' NUMPAD7 = '\ue021' NUMPAD8 = '\ue022' NUMPAD9 = '\ue023' MULTIPLY = '\ue024' ADD = '\ue025' SEPARATOR = '\ue026' SUBTRACT = '\ue027' DECIMAL = '\ue028' DIVIDE = '\ue029' F1 = '\ue031' # function keys F2 = '\ue032' F3 = '\ue033' F4 = '\ue034' F5 = '\ue035' F6 = '\ue036' F7 = '\ue037' F8 = '\ue038' F9 = '\ue039' F10 = '\ue03a' F11 = '\ue03b' F12 = '\ue03c' META = '\ue03d' COMMAND = '\ue03d'
apache-2.0
samba-team/samba
third_party/waf/waflib/Tools/ccroot.py
40
26260
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Classes and methods shared by tools providing support for C-like language such as C/C++/D/Assembly/Go (this support module is almost never used alone). """ import os, re from waflib import Task, Utils, Node, Errors, Logs from waflib.TaskGen import after_method, before_method, feature, taskgen_method, extension from waflib.Tools import c_aliases, c_preproc, c_config, c_osx, c_tests from waflib.Configure import conf SYSTEM_LIB_PATHS = ['/usr/lib64', '/usr/lib', '/usr/local/lib64', '/usr/local/lib'] USELIB_VARS = Utils.defaultdict(set) """ Mapping for features to :py:class:`waflib.ConfigSet.ConfigSet` variables. See :py:func:`waflib.Tools.ccroot.propagate_uselib_vars`. """ USELIB_VARS['c'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CCDEPS', 'CFLAGS', 'ARCH']) USELIB_VARS['cxx'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CXXDEPS', 'CXXFLAGS', 'ARCH']) USELIB_VARS['d'] = set(['INCLUDES', 'DFLAGS']) USELIB_VARS['includes'] = set(['INCLUDES', 'FRAMEWORKPATH', 'ARCH']) USELIB_VARS['cprogram'] = USELIB_VARS['cxxprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cshlib'] = USELIB_VARS['cxxshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cstlib'] = USELIB_VARS['cxxstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['dprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['asm'] = set(['ASFLAGS']) # ================================================================================================= @taskgen_method def create_compiled_task(self, name, node): """ Create the compilation task: c, cxx, asm, etc. The output node is created automatically (object file with a typical **.o** extension). The task is appended to the list *compiled_tasks* which is then used by :py:func:`waflib.Tools.ccroot.apply_link` :param name: name of the task class :type name: string :param node: the file to compile :type node: :py:class:`waflib.Node.Node` :return: The task created :rtype: :py:class:`waflib.Task.Task` """ out = '%s.%d.o' % (node.name, self.idx) task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @taskgen_method def to_incnodes(self, inlst): """ Task generator method provided to convert a list of string/nodes into a list of includes folders. The paths are assumed to be relative to the task generator path, except if they begin by **#** in which case they are searched from the top-level directory (``bld.srcnode``). The folders are simply assumed to be existing. The node objects in the list are returned in the output list. The strings are converted into node objects if possible. The node is searched from the source directory, and if a match is found, the equivalent build directory is created and added to the returned list too. When a folder cannot be found, it is ignored. :param inlst: list of folders :type inlst: space-delimited string or a list of string/nodes :rtype: list of :py:class:`waflib.Node.Node` :return: list of include folders as nodes """ lst = [] seen = set() for x in self.to_list(inlst): if x in seen or not x: continue seen.add(x) # with a real lot of targets, it is sometimes interesting to cache the results below if isinstance(x, Node.Node): lst.append(x) else: if os.path.isabs(x): lst.append(self.bld.root.make_node(x) or x) else: if x[0] == '#': p = self.bld.bldnode.make_node(x[1:]) v = self.bld.srcnode.make_node(x[1:]) else: p = self.path.get_bld().make_node(x) v = self.path.make_node(x) if p.is_child_of(self.bld.bldnode): p.mkdir() lst.append(p) lst.append(v) return lst @feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') @after_method('propagate_uselib_vars', 'process_source') def apply_incpaths(self): """ Task generator method that processes the attribute *includes*:: tg = bld(features='includes', includes='.') The folders only need to be relative to the current directory, the equivalent build directory is added automatically (for headers created in the build directory). This enables using a build directory or not (``top == out``). This method will add a list of nodes read by :py:func:`waflib.Tools.ccroot.to_incnodes` in ``tg.env.INCPATHS``, and the list of include paths in ``tg.env.INCLUDES``. """ lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env.INCLUDES) self.includes_nodes = lst cwd = self.get_cwd() self.env.INCPATHS = [x.path_from(cwd) for x in lst] class link_task(Task.Task): """ Base class for all link tasks. A task generator is supposed to have at most one link task bound in the attribute *link_task*. See :py:func:`waflib.Tools.ccroot.apply_link`. .. inheritance-diagram:: waflib.Tools.ccroot.stlink_task waflib.Tools.c.cprogram waflib.Tools.c.cshlib waflib.Tools.cxx.cxxstlib waflib.Tools.cxx.cxxprogram waflib.Tools.cxx.cxxshlib waflib.Tools.d.dprogram waflib.Tools.d.dshlib waflib.Tools.d.dstlib waflib.Tools.ccroot.fake_shlib waflib.Tools.ccroot.fake_stlib waflib.Tools.asm.asmprogram waflib.Tools.asm.asmshlib waflib.Tools.asm.asmstlib """ color = 'YELLOW' weight = 3 """Try to process link tasks as early as possible""" inst_to = None """Default installation path for the link task outputs, or None to disable""" chmod = Utils.O755 """Default installation mode for the link task outputs""" def add_target(self, target): """ Process the *target* attribute to add the platform-specific prefix/suffix such as *.so* or *.exe*. The settings are retrieved from ``env.clsname_PATTERN`` """ if isinstance(target, str): base = self.generator.path if target.startswith('#'): # for those who like flat structures target = target[1:] base = self.generator.bld.bldnode pattern = self.env[self.__class__.__name__ + '_PATTERN'] if not pattern: pattern = '%s' folder, name = os.path.split(target) if self.__class__.__name__.find('shlib') > 0 and getattr(self.generator, 'vnum', None): nums = self.generator.vnum.split('.') if self.env.DEST_BINFMT == 'pe': # include the version in the dll file name, # the import lib file name stays unversioned. name = name + '-' + nums[0] elif self.env.DEST_OS == 'openbsd': pattern = '%s.%s' % (pattern, nums[0]) if len(nums) >= 2: pattern += '.%s' % nums[1] if folder: tmp = folder + os.sep + pattern % name else: tmp = pattern % name target = base.find_or_declare(tmp) self.set_outputs(target) def exec_command(self, *k, **kw): ret = super(link_task, self).exec_command(*k, **kw) if not ret and self.env.DO_MANIFEST: ret = self.exec_mf() return ret def exec_mf(self): """ Create manifest files for VS-like compilers (msvc, ifort, ...) """ if not self.env.MT: return 0 manifest = None for out_node in self.outputs: if out_node.name.endswith('.manifest'): manifest = out_node.abspath() break else: # Should never get here. If we do, it means the manifest file was # never added to the outputs list, thus we don't have a manifest file # to embed, so we just return. return 0 # embedding mode. Different for EXE's and DLL's. # see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx mode = '' for x in Utils.to_list(self.generator.features): if x in ('cprogram', 'cxxprogram', 'fcprogram', 'fcprogram_test'): mode = 1 elif x in ('cshlib', 'cxxshlib', 'fcshlib'): mode = 2 Logs.debug('msvc: embedding manifest in mode %r', mode) lst = [] + self.env.MT lst.extend(Utils.to_list(self.env.MTFLAGS)) lst.extend(['-manifest', manifest]) lst.append('-outputresource:%s;%s' % (self.outputs[0].abspath(), mode)) return super(link_task, self).exec_command(lst) class stlink_task(link_task): """ Base for static link tasks, which use *ar* most of the time. The target is always removed before being written. """ run_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}' chmod = Utils.O644 """Default installation mode for the static libraries""" def rm_tgt(cls): old = cls.run def wrap(self): try: os.remove(self.outputs[0].abspath()) except OSError: pass return old(self) setattr(cls, 'run', wrap) rm_tgt(stlink_task) @feature('skip_stlib_link_deps') @before_method('process_use') def apply_skip_stlib_link_deps(self): """ This enables an optimization in the :py:func:wafilb.Tools.ccroot.processes_use: method that skips dependency and link flag optimizations for targets that generate static libraries (via the :py:class:Tools.ccroot.stlink_task task). The actual behavior is implemented in :py:func:wafilb.Tools.ccroot.processes_use: method so this feature only tells waf to enable the new behavior. """ self.env.SKIP_STLIB_LINK_DEPS = True @feature('c', 'cxx', 'd', 'fc', 'asm') @after_method('process_source') def apply_link(self): """ Collect the tasks stored in ``compiled_tasks`` (created by :py:func:`waflib.Tools.ccroot.create_compiled_task`), and use the outputs for a new instance of :py:class:`waflib.Tools.ccroot.link_task`. The class to use is the first link task matching a name from the attribute *features*, for example:: def build(bld): tg = bld(features='cxx cxxprogram cprogram', source='main.c', target='app') will create the task ``tg.link_task`` as a new instance of :py:class:`waflib.Tools.cxx.cxxprogram` """ for x in self.features: if x == 'cprogram' and 'cxx' in self.features: # limited compat x = 'cxxprogram' elif x == 'cshlib' and 'cxx' in self.features: x = 'cxxshlib' if x in Task.classes: if issubclass(Task.classes[x], link_task): link = x break else: return objs = [t.outputs[0] for t in getattr(self, 'compiled_tasks', [])] self.link_task = self.create_task(link, objs) self.link_task.add_target(self.target) # remember that the install paths are given by the task generators try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: # install a copy of the node list we have at this moment (implib not added) self.install_task = self.add_install_files( install_to=inst_to, install_from=self.link_task.outputs[:], chmod=self.link_task.chmod, task=self.link_task) @taskgen_method def use_rec(self, name, **kw): """ Processes the ``use`` keyword recursively. This method is kind of private and only meant to be used from ``process_use`` """ if name in self.tmp_use_not or name in self.tmp_use_seen: return try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) self.tmp_use_not.add(name) return self.tmp_use_seen.append(name) y.post() # bind temporary attributes on the task generator y.tmp_use_objects = objects = kw.get('objects', True) y.tmp_use_stlib = stlib = kw.get('stlib', True) try: link_task = y.link_task except AttributeError: y.tmp_use_var = '' else: objects = False if not isinstance(link_task, stlink_task): stlib = False y.tmp_use_var = 'LIB' else: y.tmp_use_var = 'STLIB' p = self.tmp_use_prec for x in self.to_list(getattr(y, 'use', [])): if self.env["STLIB_" + x]: continue try: p[x].append(name) except KeyError: p[x] = [name] self.use_rec(x, objects=objects, stlib=stlib) @feature('c', 'cxx', 'd', 'use', 'fc') @before_method('apply_incpaths', 'propagate_uselib_vars') @after_method('apply_link', 'process_source') def process_use(self): """ Process the ``use`` attribute which contains a list of task generator names:: def build(bld): bld.shlib(source='a.c', target='lib1') bld.program(source='main.c', target='app', use='lib1') See :py:func:`waflib.Tools.ccroot.use_rec`. """ use_not = self.tmp_use_not = set() self.tmp_use_seen = [] # we would like an ordered set use_prec = self.tmp_use_prec = {} self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'use', [])) for x in names: self.use_rec(x) for x in use_not: if x in use_prec: del use_prec[x] # topological sort out = self.tmp_use_sorted = [] tmp = [] for x in self.tmp_use_seen: for k in use_prec.values(): if x in k: break else: tmp.append(x) while tmp: e = tmp.pop() out.append(e) try: nlst = use_prec[e] except KeyError: pass else: del use_prec[e] for x in nlst: for y in use_prec: if x in use_prec[y]: break else: tmp.append(x) if use_prec: raise Errors.WafError('Cycle detected in the use processing %r' % use_prec) out.reverse() link_task = getattr(self, 'link_task', None) for x in out: y = self.bld.get_tgen_by_name(x) var = y.tmp_use_var if var and link_task: if self.env.SKIP_STLIB_LINK_DEPS and isinstance(link_task, stlink_task): # If the skip_stlib_link_deps feature is enabled then we should # avoid adding lib deps to the stlink_task instance. pass elif var == 'LIB' or y.tmp_use_stlib or x in names: self.env.append_value(var, [y.target[y.target.rfind(os.sep) + 1:]]) self.link_task.dep_nodes.extend(y.link_task.outputs) tmp_path = y.link_task.outputs[0].parent.path_from(self.get_cwd()) self.env.append_unique(var + 'PATH', [tmp_path]) else: if y.tmp_use_objects: self.add_objects_from_tgen(y) if getattr(y, 'export_includes', None): # self.includes may come from a global variable #2035 self.includes = self.includes + y.to_incnodes(y.export_includes) if getattr(y, 'export_defines', None): self.env.append_value('DEFINES', self.to_list(y.export_defines)) # and finally, add the use variables (no recursion needed) for x in names: try: y = self.bld.get_tgen_by_name(x) except Errors.WafError: if not self.env['STLIB_' + x] and not x in self.uselib: self.uselib.append(x) else: for k in self.to_list(getattr(y, 'use', [])): if not self.env['STLIB_' + k] and not k in self.uselib: self.uselib.append(k) @taskgen_method def accept_node_to_link(self, node): """ PRIVATE INTERNAL USE ONLY """ return not node.name.endswith('.pdb') @taskgen_method def add_objects_from_tgen(self, tg): """ Add the objects from the depending compiled tasks as link task inputs. Some objects are filtered: for instance, .pdb files are added to the compiled tasks but not to the link tasks (to avoid errors) PRIVATE INTERNAL USE ONLY """ try: link_task = self.link_task except AttributeError: pass else: for tsk in getattr(tg, 'compiled_tasks', []): for x in tsk.outputs: if self.accept_node_to_link(x): link_task.inputs.append(x) @taskgen_method def get_uselib_vars(self): """ :return: the *uselib* variables associated to the *features* attribute (see :py:attr:`waflib.Tools.ccroot.USELIB_VARS`) :rtype: list of string """ _vars = set() for x in self.features: if x in USELIB_VARS: _vars |= USELIB_VARS[x] return _vars @feature('c', 'cxx', 'd', 'fc', 'javac', 'cs', 'uselib', 'asm') @after_method('process_use') def propagate_uselib_vars(self): """ Process uselib variables for adding flags. For example, the following target:: def build(bld): bld.env.AFLAGS_aaa = ['bar'] from waflib.Tools.ccroot import USELIB_VARS USELIB_VARS['aaa'] = ['AFLAGS'] tg = bld(features='aaa', aflags='test') The *aflags* attribute will be processed and this method will set:: tg.env.AFLAGS = ['bar', 'test'] """ _vars = self.get_uselib_vars() env = self.env app = env.append_value feature_uselib = self.features + self.to_list(getattr(self, 'uselib', [])) for var in _vars: y = var.lower() val = getattr(self, y, []) if val: app(var, self.to_list(val)) for x in feature_uselib: val = env['%s_%s' % (var, x)] if val: app(var, val) # ============ the code above must not know anything about import libs ========== @feature('cshlib', 'cxxshlib', 'fcshlib') @after_method('apply_link') def apply_implib(self): """ Handle dlls and their import libs on Windows-like systems. A ``.dll.a`` file called *import library* is generated. It must be installed as it is required for linking the library. """ if not self.env.DEST_BINFMT == 'pe': return dll = self.link_task.outputs[0] if isinstance(self.target, Node.Node): name = self.target.name else: name = os.path.split(self.target)[1] implib = self.env.implib_PATTERN % name implib = dll.parent.find_or_declare(implib) self.env.append_value('LINKFLAGS', self.env.IMPLIB_ST % implib.bldpath()) self.link_task.outputs.append(implib) if getattr(self, 'defs', None) and self.env.DEST_BINFMT == 'pe': node = self.path.find_resource(self.defs) if not node: raise Errors.WafError('invalid def file %r' % self.defs) if self.env.def_PATTERN: self.env.append_value('LINKFLAGS', self.env.def_PATTERN % node.path_from(self.get_cwd())) self.link_task.dep_nodes.append(node) else: # gcc for windows takes *.def file as input without any special flag self.link_task.inputs.append(node) # where to put the import library if getattr(self, 'install_task', None): try: # user has given a specific installation path for the import library inst_to = self.install_path_implib except AttributeError: try: # user has given an installation path for the main library, put the import library in it inst_to = self.install_path except AttributeError: # else, put the library in BINDIR and the import library in LIBDIR inst_to = '${IMPLIBDIR}' self.install_task.install_to = '${BINDIR}' if not self.env.IMPLIBDIR: self.env.IMPLIBDIR = self.env.LIBDIR self.implib_install_task = self.add_install_files(install_to=inst_to, install_from=implib, chmod=self.link_task.chmod, task=self.link_task) # ============ the code above must not know anything about vnum processing on unix platforms ========= re_vnum = re.compile('^([1-9]\\d*|0)([.]([1-9]\\d*|0)){0,2}?$') @feature('cshlib', 'cxxshlib', 'dshlib', 'fcshlib', 'vnum') @after_method('apply_link', 'propagate_uselib_vars') def apply_vnum(self): """ Enforce version numbering on shared libraries. The valid version numbers must have either zero or two dots:: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16') In this example on Linux platform, ``libfoo.so`` is installed as ``libfoo.so.14.15.16``, and the following symbolic links are created: * ``libfoo.so → libfoo.so.14.15.16`` * ``libfoo.so.14 → libfoo.so.14.15.16`` By default, the library will be assigned SONAME ``libfoo.so.14``, effectively declaring ABI compatibility between all minor and patch releases for the major version of the library. When necessary, the compatibility can be explicitly defined using `cnum` parameter: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16', cnum='14.15') In this case, the assigned SONAME will be ``libfoo.so.14.15`` with ABI compatibility only between path releases for a specific major and minor version of the library. On OS X platform, install-name parameter will follow the above logic for SONAME with exception that it also specifies an absolute path (based on install_path) of the library. """ if not getattr(self, 'vnum', '') or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'): return link = self.link_task if not re_vnum.match(self.vnum): raise Errors.WafError('Invalid vnum %r for target %r' % (self.vnum, getattr(self, 'name', self))) nums = self.vnum.split('.') node = link.outputs[0] cnum = getattr(self, 'cnum', str(nums[0])) cnums = cnum.split('.') if len(cnums)>len(nums) or nums[0:len(cnums)] != cnums: raise Errors.WafError('invalid compatibility version %s' % cnum) libname = node.name if libname.endswith('.dylib'): name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum) name2 = libname.replace('.dylib', '.%s.dylib' % cnum) else: name3 = libname + '.' + self.vnum name2 = libname + '.' + cnum # add the so name for the ld linker - to disable, just unset env.SONAME_ST if self.env.SONAME_ST: v = self.env.SONAME_ST % name2 self.env.append_value('LINKFLAGS', v.split()) # the following task is just to enable execution from the build dir :-/ if self.env.DEST_OS != 'openbsd': outs = [node.parent.make_node(name3)] if name2 != name3: outs.append(node.parent.make_node(name2)) self.create_task('vnum', node, outs) if getattr(self, 'install_task', None): self.install_task.hasrun = Task.SKIPPED self.install_task.no_errcheck_out = True path = self.install_task.install_to if self.env.DEST_OS == 'openbsd': libname = self.link_task.outputs[0].name t1 = self.add_install_as(install_to='%s/%s' % (path, libname), install_from=node, chmod=self.link_task.chmod) self.vnum_install_task = (t1,) else: t1 = self.add_install_as(install_to=path + os.sep + name3, install_from=node, chmod=self.link_task.chmod) t3 = self.add_symlink_as(install_to=path + os.sep + libname, install_from=name3) if name2 != name3: t2 = self.add_symlink_as(install_to=path + os.sep + name2, install_from=name3) self.vnum_install_task = (t1, t2, t3) else: self.vnum_install_task = (t1, t3) if '-dynamiclib' in self.env.LINKFLAGS: # this requires after(propagate_uselib_vars) try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: p = Utils.subst_vars(inst_to, self.env) path = os.path.join(p, name2) self.env.append_value('LINKFLAGS', ['-install_name', path]) self.env.append_value('LINKFLAGS', '-Wl,-compatibility_version,%s' % cnum) self.env.append_value('LINKFLAGS', '-Wl,-current_version,%s' % self.vnum) class vnum(Task.Task): """ Create the symbolic links for a versioned shared library. Instances are created by :py:func:`waflib.Tools.ccroot.apply_vnum` """ color = 'CYAN' ext_in = ['.bin'] def keyword(self): return 'Symlinking' def run(self): for x in self.outputs: path = x.abspath() try: os.remove(path) except OSError: pass try: os.symlink(self.inputs[0].name, path) except OSError: return 1 class fake_shlib(link_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME class fake_stlib(stlink_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME @conf def read_shlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes:: def build(bld): bld.read_shlib('m') bld.program(source='main.c', use='m') """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='shlib', export_includes=export_includes, export_defines=export_defines) @conf def read_stlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system static library, enabling a use as a local library. Will trigger a rebuild if the file changes. """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='stlib', export_includes=export_includes, export_defines=export_defines) lib_patterns = { 'shlib' : ['lib%s.so', '%s.so', 'lib%s.dylib', 'lib%s.dll', '%s.dll'], 'stlib' : ['lib%s.a', '%s.a', 'lib%s.dll', '%s.dll', 'lib%s.lib', '%s.lib'], } @feature('fake_lib') def process_lib(self): """ Find the location of a foreign library. Used by :py:class:`waflib.Tools.ccroot.read_shlib` and :py:class:`waflib.Tools.ccroot.read_stlib`. """ node = None names = [x % self.name for x in lib_patterns[self.lib_type]] for x in self.lib_paths + [self.path] + SYSTEM_LIB_PATHS: if not isinstance(x, Node.Node): x = self.bld.root.find_node(x) or self.path.find_node(x) if not x: continue for y in names: node = x.find_node(y) if node: try: Utils.h_file(node.abspath()) except EnvironmentError: raise ValueError('Could not read %r' % y) break else: continue break else: raise Errors.WafError('could not find library %r' % self.name) self.link_task = self.create_task('fake_%s' % self.lib_type, [], [node]) self.target = self.name class fake_o(Task.Task): def runnable_status(self): return Task.SKIP_ME @extension('.o', '.obj') def add_those_o_files(self, node): tsk = self.create_task('fake_o', [], node) try: self.compiled_tasks.append(tsk) except AttributeError: self.compiled_tasks = [tsk] @feature('fake_obj') @before_method('process_source') def process_objs(self): """ Puts object files in the task generator outputs """ for node in self.to_nodes(self.source): self.add_those_o_files(node) self.source = [] @conf def read_object(self, obj): """ Read an object file, enabling injection in libs/programs. Will trigger a rebuild if the file changes. :param obj: object file path, as string or Node """ if not isinstance(obj, self.path.__class__): obj = self.path.find_resource(obj) return self(features='fake_obj', source=obj, name=obj.name) @feature('cxxprogram', 'cprogram') @after_method('apply_link', 'process_use') def set_full_paths_hpux(self): """ On hp-ux, extend the libpaths and static library paths to absolute paths """ if self.env.DEST_OS != 'hp-ux': return base = self.bld.bldnode.abspath() for var in ['LIBPATH', 'STLIBPATH']: lst = [] for x in self.env[var]: if x.startswith('/'): lst.append(x) else: lst.append(os.path.normpath(os.path.join(base, x))) self.env[var] = lst
gpl-3.0
matmutant/sl4a
python/src/Lib/test/test_with.py
53
23715
#!/usr/bin/env python """Unit tests for the with statement specified in PEP 343.""" __author__ = "Mike Bland" __email__ = "mbland at acm dot org" import sys import unittest from collections import deque from contextlib import GeneratorContextManager, contextmanager from test.test_support import run_unittest class MockContextManager(GeneratorContextManager): def __init__(self, gen): GeneratorContextManager.__init__(self, gen) self.enter_called = False self.exit_called = False self.exit_args = None def __enter__(self): self.enter_called = True return GeneratorContextManager.__enter__(self) def __exit__(self, type, value, traceback): self.exit_called = True self.exit_args = (type, value, traceback) return GeneratorContextManager.__exit__(self, type, value, traceback) def mock_contextmanager(func): def helper(*args, **kwds): return MockContextManager(func(*args, **kwds)) return helper class MockResource(object): def __init__(self): self.yielded = False self.stopped = False @mock_contextmanager def mock_contextmanager_generator(): mock = MockResource() try: mock.yielded = True yield mock finally: mock.stopped = True class Nested(object): def __init__(self, *managers): self.managers = managers self.entered = None def __enter__(self): if self.entered is not None: raise RuntimeError("Context is not reentrant") self.entered = deque() vars = [] try: for mgr in self.managers: vars.append(mgr.__enter__()) self.entered.appendleft(mgr) except: if not self.__exit__(*sys.exc_info()): raise return vars def __exit__(self, *exc_info): # Behave like nested with statements # first in, last out # New exceptions override old ones ex = exc_info for mgr in self.entered: try: if mgr.__exit__(*ex): ex = (None, None, None) except: ex = sys.exc_info() self.entered = None if ex is not exc_info: raise ex[0], ex[1], ex[2] class MockNested(Nested): def __init__(self, *managers): Nested.__init__(self, *managers) self.enter_called = False self.exit_called = False self.exit_args = None def __enter__(self): self.enter_called = True return Nested.__enter__(self) def __exit__(self, *exc_info): self.exit_called = True self.exit_args = exc_info return Nested.__exit__(self, *exc_info) class FailureTestCase(unittest.TestCase): def testNameError(self): def fooNotDeclared(): with foo: pass self.assertRaises(NameError, fooNotDeclared) def testEnterAttributeError(self): class LacksEnter(object): def __exit__(self, type, value, traceback): pass def fooLacksEnter(): foo = LacksEnter() with foo: pass self.assertRaises(AttributeError, fooLacksEnter) def testExitAttributeError(self): class LacksExit(object): def __enter__(self): pass def fooLacksExit(): foo = LacksExit() with foo: pass self.assertRaises(AttributeError, fooLacksExit) def assertRaisesSyntaxError(self, codestr): def shouldRaiseSyntaxError(s): compile(s, '', 'single') self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr) def testAssignmentToNoneError(self): self.assertRaisesSyntaxError('with mock as None:\n pass') self.assertRaisesSyntaxError( 'with mock as (None):\n' ' pass') def testAssignmentToEmptyTupleError(self): self.assertRaisesSyntaxError( 'with mock as ():\n' ' pass') def testAssignmentToTupleOnlyContainingNoneError(self): self.assertRaisesSyntaxError('with mock as None,:\n pass') self.assertRaisesSyntaxError( 'with mock as (None,):\n' ' pass') def testAssignmentToTupleContainingNoneError(self): self.assertRaisesSyntaxError( 'with mock as (foo, None, bar):\n' ' pass') def testEnterThrows(self): class EnterThrows(object): def __enter__(self): raise RuntimeError("Enter threw") def __exit__(self, *args): pass def shouldThrow(): ct = EnterThrows() self.foo = None with ct as self.foo: pass self.assertRaises(RuntimeError, shouldThrow) self.assertEqual(self.foo, None) def testExitThrows(self): class ExitThrows(object): def __enter__(self): return def __exit__(self, *args): raise RuntimeError(42) def shouldThrow(): with ExitThrows(): pass self.assertRaises(RuntimeError, shouldThrow) class ContextmanagerAssertionMixin(object): TEST_EXCEPTION = RuntimeError("test exception") def assertInWithManagerInvariants(self, mock_manager): self.assertTrue(mock_manager.enter_called) self.assertFalse(mock_manager.exit_called) self.assertEqual(mock_manager.exit_args, None) def assertAfterWithManagerInvariants(self, mock_manager, exit_args): self.assertTrue(mock_manager.enter_called) self.assertTrue(mock_manager.exit_called) self.assertEqual(mock_manager.exit_args, exit_args) def assertAfterWithManagerInvariantsNoError(self, mock_manager): self.assertAfterWithManagerInvariants(mock_manager, (None, None, None)) def assertInWithGeneratorInvariants(self, mock_generator): self.assertTrue(mock_generator.yielded) self.assertFalse(mock_generator.stopped) def assertAfterWithGeneratorInvariantsNoError(self, mock_generator): self.assertTrue(mock_generator.yielded) self.assertTrue(mock_generator.stopped) def raiseTestException(self): raise self.TEST_EXCEPTION def assertAfterWithManagerInvariantsWithError(self, mock_manager): self.assertTrue(mock_manager.enter_called) self.assertTrue(mock_manager.exit_called) self.assertEqual(mock_manager.exit_args[0], RuntimeError) self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION) def assertAfterWithGeneratorInvariantsWithError(self, mock_generator): self.assertTrue(mock_generator.yielded) self.assertTrue(mock_generator.stopped) class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin): def testInlineGeneratorSyntax(self): with mock_contextmanager_generator(): pass def testUnboundGenerator(self): mock = mock_contextmanager_generator() with mock: pass self.assertAfterWithManagerInvariantsNoError(mock) def testInlineGeneratorBoundSyntax(self): with mock_contextmanager_generator() as foo: self.assertInWithGeneratorInvariants(foo) # FIXME: In the future, we'll try to keep the bound names from leaking self.assertAfterWithGeneratorInvariantsNoError(foo) def testInlineGeneratorBoundToExistingVariable(self): foo = None with mock_contextmanager_generator() as foo: self.assertInWithGeneratorInvariants(foo) self.assertAfterWithGeneratorInvariantsNoError(foo) def testInlineGeneratorBoundToDottedVariable(self): with mock_contextmanager_generator() as self.foo: self.assertInWithGeneratorInvariants(self.foo) self.assertAfterWithGeneratorInvariantsNoError(self.foo) def testBoundGenerator(self): mock = mock_contextmanager_generator() with mock as foo: self.assertInWithGeneratorInvariants(foo) self.assertInWithManagerInvariants(mock) self.assertAfterWithGeneratorInvariantsNoError(foo) self.assertAfterWithManagerInvariantsNoError(mock) def testNestedSingleStatements(self): mock_a = mock_contextmanager_generator() with mock_a as foo: mock_b = mock_contextmanager_generator() with mock_b as bar: self.assertInWithManagerInvariants(mock_a) self.assertInWithManagerInvariants(mock_b) self.assertInWithGeneratorInvariants(foo) self.assertInWithGeneratorInvariants(bar) self.assertAfterWithManagerInvariantsNoError(mock_b) self.assertAfterWithGeneratorInvariantsNoError(bar) self.assertInWithManagerInvariants(mock_a) self.assertInWithGeneratorInvariants(foo) self.assertAfterWithManagerInvariantsNoError(mock_a) self.assertAfterWithGeneratorInvariantsNoError(foo) class NestedNonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin): def testSingleArgInlineGeneratorSyntax(self): with Nested(mock_contextmanager_generator()): pass def testSingleArgUnbound(self): mock_contextmanager = mock_contextmanager_generator() mock_nested = MockNested(mock_contextmanager) with mock_nested: self.assertInWithManagerInvariants(mock_contextmanager) self.assertInWithManagerInvariants(mock_nested) self.assertAfterWithManagerInvariantsNoError(mock_contextmanager) self.assertAfterWithManagerInvariantsNoError(mock_nested) def testSingleArgBoundToNonTuple(self): m = mock_contextmanager_generator() # This will bind all the arguments to nested() into a single list # assigned to foo. with Nested(m) as foo: self.assertInWithManagerInvariants(m) self.assertAfterWithManagerInvariantsNoError(m) def testSingleArgBoundToSingleElementParenthesizedList(self): m = mock_contextmanager_generator() # This will bind all the arguments to nested() into a single list # assigned to foo. with Nested(m) as (foo): self.assertInWithManagerInvariants(m) self.assertAfterWithManagerInvariantsNoError(m) def testSingleArgBoundToMultipleElementTupleError(self): def shouldThrowValueError(): with Nested(mock_contextmanager_generator()) as (foo, bar): pass self.assertRaises(ValueError, shouldThrowValueError) def testSingleArgUnbound(self): mock_contextmanager = mock_contextmanager_generator() mock_nested = MockNested(mock_contextmanager) with mock_nested: self.assertInWithManagerInvariants(mock_contextmanager) self.assertInWithManagerInvariants(mock_nested) self.assertAfterWithManagerInvariantsNoError(mock_contextmanager) self.assertAfterWithManagerInvariantsNoError(mock_nested) def testMultipleArgUnbound(self): m = mock_contextmanager_generator() n = mock_contextmanager_generator() o = mock_contextmanager_generator() mock_nested = MockNested(m, n, o) with mock_nested: self.assertInWithManagerInvariants(m) self.assertInWithManagerInvariants(n) self.assertInWithManagerInvariants(o) self.assertInWithManagerInvariants(mock_nested) self.assertAfterWithManagerInvariantsNoError(m) self.assertAfterWithManagerInvariantsNoError(n) self.assertAfterWithManagerInvariantsNoError(o) self.assertAfterWithManagerInvariantsNoError(mock_nested) def testMultipleArgBound(self): mock_nested = MockNested(mock_contextmanager_generator(), mock_contextmanager_generator(), mock_contextmanager_generator()) with mock_nested as (m, n, o): self.assertInWithGeneratorInvariants(m) self.assertInWithGeneratorInvariants(n) self.assertInWithGeneratorInvariants(o) self.assertInWithManagerInvariants(mock_nested) self.assertAfterWithGeneratorInvariantsNoError(m) self.assertAfterWithGeneratorInvariantsNoError(n) self.assertAfterWithGeneratorInvariantsNoError(o) self.assertAfterWithManagerInvariantsNoError(mock_nested) class ExceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin): def testSingleResource(self): cm = mock_contextmanager_generator() def shouldThrow(): with cm as self.resource: self.assertInWithManagerInvariants(cm) self.assertInWithGeneratorInvariants(self.resource) self.raiseTestException() self.assertRaises(RuntimeError, shouldThrow) self.assertAfterWithManagerInvariantsWithError(cm) self.assertAfterWithGeneratorInvariantsWithError(self.resource) def testNestedSingleStatements(self): mock_a = mock_contextmanager_generator() mock_b = mock_contextmanager_generator() def shouldThrow(): with mock_a as self.foo: with mock_b as self.bar: self.assertInWithManagerInvariants(mock_a) self.assertInWithManagerInvariants(mock_b) self.assertInWithGeneratorInvariants(self.foo) self.assertInWithGeneratorInvariants(self.bar) self.raiseTestException() self.assertRaises(RuntimeError, shouldThrow) self.assertAfterWithManagerInvariantsWithError(mock_a) self.assertAfterWithManagerInvariantsWithError(mock_b) self.assertAfterWithGeneratorInvariantsWithError(self.foo) self.assertAfterWithGeneratorInvariantsWithError(self.bar) def testMultipleResourcesInSingleStatement(self): cm_a = mock_contextmanager_generator() cm_b = mock_contextmanager_generator() mock_nested = MockNested(cm_a, cm_b) def shouldThrow(): with mock_nested as (self.resource_a, self.resource_b): self.assertInWithManagerInvariants(cm_a) self.assertInWithManagerInvariants(cm_b) self.assertInWithManagerInvariants(mock_nested) self.assertInWithGeneratorInvariants(self.resource_a) self.assertInWithGeneratorInvariants(self.resource_b) self.raiseTestException() self.assertRaises(RuntimeError, shouldThrow) self.assertAfterWithManagerInvariantsWithError(cm_a) self.assertAfterWithManagerInvariantsWithError(cm_b) self.assertAfterWithManagerInvariantsWithError(mock_nested) self.assertAfterWithGeneratorInvariantsWithError(self.resource_a) self.assertAfterWithGeneratorInvariantsWithError(self.resource_b) def testNestedExceptionBeforeInnerStatement(self): mock_a = mock_contextmanager_generator() mock_b = mock_contextmanager_generator() self.bar = None def shouldThrow(): with mock_a as self.foo: self.assertInWithManagerInvariants(mock_a) self.assertInWithGeneratorInvariants(self.foo) self.raiseTestException() with mock_b as self.bar: pass self.assertRaises(RuntimeError, shouldThrow) self.assertAfterWithManagerInvariantsWithError(mock_a) self.assertAfterWithGeneratorInvariantsWithError(self.foo) # The inner statement stuff should never have been touched self.assertEqual(self.bar, None) self.assertFalse(mock_b.enter_called) self.assertFalse(mock_b.exit_called) self.assertEqual(mock_b.exit_args, None) def testNestedExceptionAfterInnerStatement(self): mock_a = mock_contextmanager_generator() mock_b = mock_contextmanager_generator() def shouldThrow(): with mock_a as self.foo: with mock_b as self.bar: self.assertInWithManagerInvariants(mock_a) self.assertInWithManagerInvariants(mock_b) self.assertInWithGeneratorInvariants(self.foo) self.assertInWithGeneratorInvariants(self.bar) self.raiseTestException() self.assertRaises(RuntimeError, shouldThrow) self.assertAfterWithManagerInvariantsWithError(mock_a) self.assertAfterWithManagerInvariantsNoError(mock_b) self.assertAfterWithGeneratorInvariantsWithError(self.foo) self.assertAfterWithGeneratorInvariantsNoError(self.bar) def testRaisedStopIteration1(self): # From bug 1462485 @contextmanager def cm(): yield def shouldThrow(): with cm(): raise StopIteration("from with") self.assertRaises(StopIteration, shouldThrow) def testRaisedStopIteration2(self): # From bug 1462485 class cm(object): def __enter__(self): pass def __exit__(self, type, value, traceback): pass def shouldThrow(): with cm(): raise StopIteration("from with") self.assertRaises(StopIteration, shouldThrow) def testRaisedStopIteration3(self): # Another variant where the exception hasn't been instantiated # From bug 1705170 @contextmanager def cm(): yield def shouldThrow(): with cm(): raise iter([]).next() self.assertRaises(StopIteration, shouldThrow) def testRaisedGeneratorExit1(self): # From bug 1462485 @contextmanager def cm(): yield def shouldThrow(): with cm(): raise GeneratorExit("from with") self.assertRaises(GeneratorExit, shouldThrow) def testRaisedGeneratorExit2(self): # From bug 1462485 class cm (object): def __enter__(self): pass def __exit__(self, type, value, traceback): pass def shouldThrow(): with cm(): raise GeneratorExit("from with") self.assertRaises(GeneratorExit, shouldThrow) def testErrorsInBool(self): # issue4589: __exit__ return code may raise an exception # when looking at its truth value. class cm(object): def __init__(self, bool_conversion): class Bool: def __nonzero__(self): return bool_conversion() self.exit_result = Bool() def __enter__(self): return 3 def __exit__(self, a, b, c): return self.exit_result def trueAsBool(): with cm(lambda: True): self.fail("Should NOT see this") trueAsBool() def falseAsBool(): with cm(lambda: False): self.fail("Should raise") self.assertRaises(AssertionError, falseAsBool) def failAsBool(): with cm(lambda: 1//0): self.fail("Should NOT see this") self.assertRaises(ZeroDivisionError, failAsBool) class NonLocalFlowControlTestCase(unittest.TestCase): def testWithBreak(self): counter = 0 while True: counter += 1 with mock_contextmanager_generator(): counter += 10 break counter += 100 # Not reached self.assertEqual(counter, 11) def testWithContinue(self): counter = 0 while True: counter += 1 if counter > 2: break with mock_contextmanager_generator(): counter += 10 continue counter += 100 # Not reached self.assertEqual(counter, 12) def testWithReturn(self): def foo(): counter = 0 while True: counter += 1 with mock_contextmanager_generator(): counter += 10 return counter counter += 100 # Not reached self.assertEqual(foo(), 11) def testWithYield(self): def gen(): with mock_contextmanager_generator(): yield 12 yield 13 x = list(gen()) self.assertEqual(x, [12, 13]) def testWithRaise(self): counter = 0 try: counter += 1 with mock_contextmanager_generator(): counter += 10 raise RuntimeError counter += 100 # Not reached except RuntimeError: self.assertEqual(counter, 11) else: self.fail("Didn't raise RuntimeError") class AssignmentTargetTestCase(unittest.TestCase): def testSingleComplexTarget(self): targets = {1: [0, 1, 2]} with mock_contextmanager_generator() as targets[1][0]: self.assertEqual(targets.keys(), [1]) self.assertEqual(targets[1][0].__class__, MockResource) with mock_contextmanager_generator() as targets.values()[0][1]: self.assertEqual(targets.keys(), [1]) self.assertEqual(targets[1][1].__class__, MockResource) with mock_contextmanager_generator() as targets[2]: keys = targets.keys() keys.sort() self.assertEqual(keys, [1, 2]) class C: pass blah = C() with mock_contextmanager_generator() as blah.foo: self.assertEqual(hasattr(blah, "foo"), True) def testMultipleComplexTargets(self): class C: def __enter__(self): return 1, 2, 3 def __exit__(self, t, v, tb): pass targets = {1: [0, 1, 2]} with C() as (targets[1][0], targets[1][1], targets[1][2]): self.assertEqual(targets, {1: [1, 2, 3]}) with C() as (targets.values()[0][2], targets.values()[0][1], targets.values()[0][0]): self.assertEqual(targets, {1: [3, 2, 1]}) with C() as (targets[1], targets[2], targets[3]): self.assertEqual(targets, {1: 1, 2: 2, 3: 3}) class B: pass blah = B() with C() as (blah.one, blah.two, blah.three): self.assertEqual(blah.one, 1) self.assertEqual(blah.two, 2) self.assertEqual(blah.three, 3) class ExitSwallowsExceptionTestCase(unittest.TestCase): def testExitTrueSwallowsException(self): class AfricanSwallow: def __enter__(self): pass def __exit__(self, t, v, tb): return True try: with AfricanSwallow(): 1/0 except ZeroDivisionError: self.fail("ZeroDivisionError should have been swallowed") def testExitFalseDoesntSwallowException(self): class EuropeanSwallow: def __enter__(self): pass def __exit__(self, t, v, tb): return False try: with EuropeanSwallow(): 1/0 except ZeroDivisionError: pass else: self.fail("ZeroDivisionError should have been raised") def test_main(): run_unittest(FailureTestCase, NonexceptionalTestCase, NestedNonexceptionalTestCase, ExceptionalTestCase, NonLocalFlowControlTestCase, AssignmentTargetTestCase, ExitSwallowsExceptionTestCase) if __name__ == '__main__': test_main()
apache-2.0
shawnlinq/SlqBlog2
accounts/permissions.py
1
1727
#!/usr/bin/env python # -*- coding: utf-8 -*- from flask import current_app from flask_principal import Permission, RoleNeed, UserNeed, identity_loaded from flask_login import current_user # admin_need = RoleNeed('admin') # editor_need = RoleNeed('editor') # writer_need = RoleNeed('writer') # reader_need = RoleNeed('reader') su_need = RoleNeed('su') su_permission = Permission(su_need) admin_permission = Permission(RoleNeed('admin')).union(su_permission) editor_permission = Permission(RoleNeed('editor')).union(admin_permission) writer_permission = Permission(RoleNeed('writer')).union(editor_permission) reader_permission = Permission(RoleNeed('reader')).union(writer_permission) @identity_loaded.connect # Both of this and the following works # @identity_loaded.connect_via(current_app) def on_identity_loaded(sender, identity): # Set the identity user object identity.user = current_user # Add the UserNeed to the identity if hasattr(current_user, 'username'): identity.provides.add(UserNeed(current_user.username)) # Assuming the User model has a list of roles, update the # identity with the roles that the user provides if hasattr(current_user, 'role'): # for role in current_user.roles: identity.provides.add(RoleNeed(current_user.role)) # if current_user.is_superuser: if hasattr(current_user, 'is_superuser') and current_user.is_superuser: identity.provides.add(su_need) # return current_user.role identity.allow_edit = editor_permission.allows(identity) identity.allow_admin = admin_permission.allows(identity) identity.allow_write = writer_permission.allows(identity)
gpl-2.0
bmanojlovic/ansible
lib/ansible/modules/network/basics/uri.py
7
17956
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Romeo Theriault <romeot () hawaii.edu> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # see examples/playbooks/uri.yml ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'core', 'version': '1.0'} DOCUMENTATION = ''' --- module: uri short_description: Interacts with webservices description: - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE HTTP authentication mechanisms. version_added: "1.1" options: url: description: - HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path required: true default: null dest: description: - path of where to download the file to (if desired). If I(dest) is a directory, the basename of the file on the remote server will be used. required: false default: null user: description: - username for the module to use for Digest, Basic or WSSE authentication. required: false default: null password: description: - password for the module to use for Digest, Basic or WSSE authentication. required: false default: null body: description: - The body of the http request/response to the web service. If C(body_format) is set to 'json' it will take an already formatted JSON string or convert a data structure into JSON. required: false default: null body_format: description: - The serialization format of the body. When set to json, encodes the body argument, if needed, and automatically sets the Content-Type header accordingly. As of C(2.3) it is possible to override the `Content-Type` header, when set to json via the I(headers) option. required: false choices: [ "raw", "json" ] default: raw version_added: "2.0" method: description: - The HTTP method of the request or response. It MUST be uppercase. required: false choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ] default: "GET" return_content: description: - Whether or not to return the body of the request as a "content" key in the dictionary result. If the reported Content-type is "application/json", then the JSON is additionally loaded into a key called C(json) in the dictionary results. required: false choices: [ "yes", "no" ] default: "no" force_basic_auth: description: - The library used by the uri module only sends authentication information when a webservice responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This option forces the sending of the Basic authentication header upon initial request. required: false choices: [ "yes", "no" ] default: "no" follow_redirects: description: - Whether or not the URI module should follow redirects. C(all) will follow all redirects. C(safe) will follow only "safe" redirects, where "safe" means that the client is only doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility, where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no) are deprecated and will be removed in some future version of Ansible. required: false choices: [ "all", "safe", "none" ] default: "safe" creates: description: - a filename, when it already exists, this step will not be run. required: false removes: description: - a filename, when it does not exist, this step will not be run. required: false status_code: description: - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes. required: false default: 200 timeout: description: - The socket level timeout in seconds required: false default: 30 HEADER_: description: - Any parameter starting with "HEADER_" is a sent with your request as a header. For example, HEADER_Content-Type="application/json" would send the header "Content-Type" along with your request with a value of "application/json". This option is deprecated as of C(2.1) and may be removed in a future release. Use I(headers) instead. required: false default: null headers: description: - Add custom HTTP headers to a request in the format of a YAML hash. As of C(2.3) supplying C(Content-Type) here will override the header generated by supplying C(json) for I(body_format). required: false default: null version_added: '2.1' others: description: - all arguments accepted by the M(file) module also work here required: false validate_certs: description: - If C(no), SSL certificates will not be validated. This should only set to C(no) used on personally controlled sites using self-signed certificates. Prior to 1.9.2 the code defaulted to C(no). required: false default: 'yes' choices: ['yes', 'no'] version_added: '1.9.2' notes: - The dependency on httplib2 was removed in Ansible 2.1 author: "Romeo Theriault (@romeotheriault)" ''' EXAMPLES = ''' - name: Check that you can connect (GET) to a page and it returns a status 200 uri: url: http://www.example.com # Check that a page returns a status 200 and fail if the word AWESOME is not # in the page contents. - uri: url: http://www.example.com return_content: yes register: webpage - name: Fail if AWESOME is not in the page content fail: when: "'AWESOME' not in webpage.content" - name: Create a JIRA issue uri: url: https://your.jira.example.com/rest/api/2/issue/ method: POST user: your_username password: your_pass body: "{{ lookup('file','issue.json') }}" force_basic_auth: yes status_code: 201 body_format: json # Login to a form based webpage, then use the returned cookie to # access the app in later tasks - uri: url: https://your.form.based.auth.example.com/index.php method: POST body: "name=your_username&password=your_password&enter=Sign%20in" status_code: 302 HEADER_Content-Type: "application/x-www-form-urlencoded" register: login - uri: url: https://your.form.based.auth.example.com/dashboard.php method: GET return_content: yes HEADER_Cookie: "{{login.set_cookie}}" - name: Queue build of a project in Jenkins uri: url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}" method: GET user: "{{ jenkins.user }}" password: "{{ jenkins.password }}" force_basic_auth: yes status_code: 201 ''' import cgi import datetime import os import shutil import tempfile try: import json except ImportError: import simplejson as json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception import ansible.module_utils.six as six from ansible.module_utils._text import to_text from ansible.module_utils.urls import fetch_url, url_argument_spec def write_file(module, url, dest, content): # create a tempfile with some test content fd, tmpsrc = tempfile.mkstemp() f = open(tmpsrc, 'wb') try: f.write(content) except Exception: err = get_exception() os.remove(tmpsrc) module.fail_json(msg="failed to create temporary content file: %s" % str(err)) f.close() checksum_src = None checksum_dest = None # raise an error if there is no tmpsrc file if not os.path.exists(tmpsrc): os.remove(tmpsrc) module.fail_json(msg="Source %s does not exist" % (tmpsrc)) if not os.access(tmpsrc, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Source %s not readable" % (tmpsrc)) checksum_src = module.sha1(tmpsrc) # check if there is no dest file if os.path.exists(dest): # raise an error if copy has no permission on dest if not os.access(dest, os.W_OK): os.remove(tmpsrc) module.fail_json(msg="Destination %s not writable" % (dest)) if not os.access(dest, os.R_OK): os.remove(tmpsrc) module.fail_json(msg="Destination %s not readable" % (dest)) checksum_dest = module.sha1(dest) else: if not os.access(os.path.dirname(dest), os.W_OK): os.remove(tmpsrc) module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest))) if checksum_src != checksum_dest: try: shutil.copyfile(tmpsrc, dest) except Exception: err = get_exception() os.remove(tmpsrc) module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) os.remove(tmpsrc) def url_filename(url): fn = os.path.basename(six.moves.urllib.parse.urlsplit(url)[2]) if fn == '': return 'index.html' return fn def absolute_location(url, location): """Attempts to create an absolute URL based on initial URL, and next URL, specifically in the case of a ``Location`` header. """ if '://' in location: return location elif location.startswith('/'): parts = six.moves.urllib.parse.urlsplit(url) base = url.replace(parts[2], '') return '%s%s' % (base, location) elif not location.startswith('/'): base = os.path.dirname(url) return '%s/%s' % (base, location) else: return location def uri(module, url, dest, body, body_format, method, headers, socket_timeout): # is dest is set and is a directory, let's check if we get redirected and # set the filename from that url redirected = False redir_info = {} r = {} if dest is not None: # Stash follow_redirects, in this block we don't want to follow # we'll reset back to the supplied value soon follow_redirects = module.params['follow_redirects'] module.params['follow_redirects'] = False dest = os.path.expanduser(dest) if os.path.isdir(dest): # first check if we are redirected to a file download _, redir_info = fetch_url(module, url, data=body, headers=headers, method=method, timeout=socket_timeout) # if we are redirected, update the url with the location header, # and update dest with the new url filename if redir_info['status'] in (301, 302, 303, 307): url = redir_info['location'] redirected = True dest = os.path.join(dest, url_filename(url)) # if destination file already exist, only download if file newer if os.path.exists(dest): t = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest)) tstamp = t.strftime('%a, %d %b %Y %H:%M:%S +0000') headers['If-Modified-Since'] = tstamp # Reset follow_redirects back to the stashed value module.params['follow_redirects'] = follow_redirects resp, info = fetch_url(module, url, data=body, headers=headers, method=method, timeout=socket_timeout) try: content = resp.read() except AttributeError: # there was no content, but the error read() # may have been stored in the info as 'body' content = info.pop('body', '') r['redirected'] = redirected or info['url'] != url r.update(redir_info) r.update(info) return r, content, dest def main(): argument_spec = url_argument_spec() argument_spec.update(dict( dest = dict(required=False, default=None, type='path'), url_username = dict(required=False, default=None, aliases=['user']), url_password = dict(required=False, default=None, aliases=['password'], no_log=True), body = dict(required=False, default=None, type='raw'), body_format = dict(required=False, default='raw', choices=['raw', 'json']), method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']), return_content = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), creates = dict(required=False, default=None, type='path'), removes = dict(required=False, default=None, type='path'), status_code = dict(required=False, default=[200], type='list'), timeout = dict(required=False, default=30, type='int'), headers = dict(required=False, type='dict', default={}) )) module = AnsibleModule( argument_spec=argument_spec, check_invalid_arguments=False, add_file_common_args=True ) url = module.params['url'] body = module.params['body'] body_format = module.params['body_format'].lower() method = module.params['method'] dest = module.params['dest'] return_content = module.params['return_content'] creates = module.params['creates'] removes = module.params['removes'] status_code = [int(x) for x in list(module.params['status_code'])] socket_timeout = module.params['timeout'] dict_headers = module.params['headers'] if body_format == 'json': # Encode the body unless its a string, then assume it is pre-formatted JSON if not isinstance(body, six.string_types): body = json.dumps(body) lower_header_keys = [key.lower() for key in dict_headers] if 'content-type' not in lower_header_keys: dict_headers['Content-Type'] = 'application/json' # Grab all the http headers. Need this hack since passing multi-values is # currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}') for key, value in six.iteritems(module.params): if key.startswith("HEADER_"): skey = key.replace("HEADER_", "") dict_headers[skey] = value if creates is not None: # do not run the command if the line contains creates=filename # and the filename already exists. This allows idempotence # of uri executions. if os.path.exists(creates): module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0) if removes is not None: # do not run the command if the line contains removes=filename # and the filename do not exists. This allows idempotence # of uri executions. if not os.path.exists(removes): module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) # Make the request resp, content, dest = uri(module, url, dest, body, body_format, method, dict_headers, socket_timeout) resp['status'] = int(resp['status']) # Write the file out if requested if dest is not None: if resp['status'] == 304: changed = False else: write_file(module, url, dest, content) # allow file attribute changes changed = True module.params['path'] = dest file_args = module.load_file_common_arguments(module.params) file_args['path'] = dest changed = module.set_fs_attributes_if_different(file_args, changed) resp['path'] = dest else: changed = False # Transmogrify the headers, replacing '-' with '_', since variables dont # work with dashes. # In python3, the headers are title cased. Lowercase them to be # compatible with the python2 behaviour. uresp = {} for key, value in six.iteritems(resp): ukey = key.replace("-", "_").lower() uresp[ukey] = value try: uresp['location'] = absolute_location(url, uresp['location']) except KeyError: pass # Default content_encoding to try content_encoding = 'utf-8' if 'content_type' in uresp: content_type, params = cgi.parse_header(uresp['content_type']) if 'charset' in params: content_encoding = params['charset'] u_content = to_text(content, encoding=content_encoding) if 'application/json' in content_type or 'text/json' in content_type: try: js = json.loads(u_content) uresp['json'] = js except: pass else: u_content = to_text(content, encoding=content_encoding) if resp['status'] not in status_code: uresp['msg'] = 'Status code was not %s: %s' % (status_code, uresp.get('msg', '')) module.fail_json(content=u_content, **uresp) elif return_content: module.exit_json(changed=changed, content=u_content, **uresp) else: module.exit_json(changed=changed, **uresp) if __name__ == '__main__': main()
gpl-3.0
avaitla/Haskell-to-C---Bridge
pygccxml-1.0.0/unittests/copy_constructor_tester.py
1
1665
# Copyright 2004-2008 Roman Yakovenko. # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import os import unittest import autoconfig import parser_test_case from pygccxml import utils from pygccxml import parser from pygccxml import declarations class tester_t( parser_test_case.parser_test_case_t ): def __init__(self, *args ): parser_test_case.parser_test_case_t.__init__( self, *args ) self.global_ns = None def setUp(self): if not self.global_ns: xml_file = os.path.join( autoconfig.data_directory, 'ogre.1.7.xml' ) reader = parser.source_reader_t( autoconfig.cxx_parsers_cfg.gccxml ) self.global_ns = declarations.get_global_namespace( reader.read_xml_file(xml_file) ) self.global_ns.init_optimizer() def test( self ): for x in self.global_ns.typedefs( 'SettingsMultiMap' ): self.failUnless( not declarations.is_noncopyable( x ) ) for x in self.global_ns.typedefs( 'SettingsIterator' ): self.failUnless( not declarations.is_noncopyable( x ) ) for x in self.global_ns.typedefs( 'SectionIterator' ): self.failUnless( not declarations.is_noncopyable( x ) ) def create_suite(): suite = unittest.TestSuite() suite.addTest( unittest.makeSuite(tester_t)) return suite def run_suite(): unittest.TextTestRunner(verbosity=2).run( create_suite() ) if __name__ == "__main__": run_suite()
bsd-3-clause
tgroh/incubator-beam
sdks/python/apache_beam/utils/proto_utils.py
9
1963
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """For internal use only; no backwards-compatibility guarantees.""" from google.protobuf import any_pb2 from google.protobuf import struct_pb2 def pack_Any(msg): """Creates a protobuf Any with msg as its content. Returns None if msg is None. """ if msg is None: return None result = any_pb2.Any() result.Pack(msg) return result def unpack_Any(any_msg, msg_class): """Unpacks any_msg into msg_class. Returns None if msg_class is None. """ if msg_class is None: return None msg = msg_class() any_msg.Unpack(msg) return msg def parse_Bytes(bytes, msg_class): """Parses the String of bytes into msg_class. Returns the input bytes if msg_class is None.""" if msg_class is None: return bytes msg = msg_class() msg.ParseFromString(bytes) return msg def pack_Struct(**kwargs): """Returns a struct containing the values indicated by kwargs. """ msg = struct_pb2.Struct() for key, value in kwargs.items(): msg[key] = value # pylint: disable=unsubscriptable-object, unsupported-assignment-operation return msg def from_micros(cls, micros): result = cls() result.FromMicroseconds(micros) return result
apache-2.0
atlashealth/ansible-modules-extras
monitoring/nagios.py
2
30634
#!/usr/bin/python # -*- coding: utf-8 -*- # # This file is largely copied from the Nagios module included in the # Func project. Original copyright follows: # # func-nagios - Schedule downtime and enables/disable notifications # Copyright 2011, Red Hat, Inc. # Tim Bielawa <tbielawa@redhat.com> # # This software may be freely redistributed under the terms of the GNU # general public license version 2 or any later version. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: nagios short_description: Perform common tasks in Nagios related to downtime and notifications. description: - "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on. - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all). - When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter. version_added: "0.7" options: action: description: - Action to take. required: true default: null choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", "silence_nagios", "unsilence_nagios", "command" ] host: description: - Host to operate on in Nagios. required: false default: null cmdfile: description: - Path to the nagios I(command file) (FIFO pipe). Only required if auto-detection fails. required: false default: auto-detected author: description: - Author to leave downtime comments as. Only usable with the C(downtime) action. required: false default: Ansible minutes: description: - Minutes to schedule downtime for. - Only usable with the C(downtime) action. required: false default: 30 services: description: - What to manage downtime/alerts for. Separate multiple services with commas. C(service) is an alias for C(services). B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions. aliases: [ "service" ] required: true default: null command: description: - The raw command to send to nagios, which should not include the submitted time header or the line-feed B(Required) option when using the C(command) action. required: true default: null author: '"Tim Bielawa (@tbielawa)" <tbielawa@redhat.com>' requirements: [ "Nagios" ] ''' EXAMPLES = ''' # set 30 minutes of apache downtime - nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }} # schedule an hour of HOST downtime - nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} # schedule downtime for ALL services on HOST - nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} # schedule downtime for a few services - nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} # enable SMART disk alerts - nagios: action=enable_alerts service=smart host={{ inventory_hostname }} # "two services at once: disable httpd and nfs alerts" - nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }} # disable HOST alerts - nagios: action=disable_alerts service=host host={{ inventory_hostname }} # silence ALL alerts - nagios: action=silence host={{ inventory_hostname }} # unsilence all alerts - nagios: action=unsilence host={{ inventory_hostname }} # SHUT UP NAGIOS - nagios: action=silence_nagios # ANNOY ME NAGIOS - nagios: action=unsilence_nagios # command something - nagios: action=command command='DISABLE_FAILURE_PREDICTION' ''' import ConfigParser import types import time import os.path ###################################################################### def which_cmdfile(): locations = [ # rhel '/etc/nagios/nagios.cfg', # debian '/etc/nagios3/nagios.cfg', # older debian '/etc/nagios2/nagios.cfg', # bsd, solaris '/usr/local/etc/nagios/nagios.cfg', # groundwork it monitoring '/usr/local/groundwork/nagios/etc/nagios.cfg', # open monitoring distribution '/omd/sites/oppy/tmp/nagios/nagios.cfg', # ??? '/usr/local/nagios/etc/nagios.cfg', '/usr/local/nagios/nagios.cfg', '/opt/nagios/etc/nagios.cfg', '/opt/nagios/nagios.cfg', # icinga on debian/ubuntu '/etc/icinga/icinga.cfg', # icinga installed from source (default location) '/usr/local/icinga/etc/icinga.cfg', ] for path in locations: if os.path.exists(path): for line in open(path): if line.startswith('command_file'): return line.split('=')[1].strip() return None ###################################################################### def main(): ACTION_CHOICES = [ 'downtime', 'silence', 'unsilence', 'enable_alerts', 'disable_alerts', 'silence_nagios', 'unsilence_nagios', 'command', ] module = AnsibleModule( argument_spec=dict( action=dict(required=True, default=None, choices=ACTION_CHOICES), author=dict(default='Ansible'), host=dict(required=False, default=None), minutes=dict(default=30), cmdfile=dict(default=which_cmdfile()), services=dict(default=None, aliases=['service']), command=dict(required=False, default=None), ) ) action = module.params['action'] host = module.params['host'] minutes = module.params['minutes'] services = module.params['services'] cmdfile = module.params['cmdfile'] command = module.params['command'] ################################################################## # Required args per action: # downtime = (minutes, service, host) # (un)silence = (host) # (enable/disable)_alerts = (service, host) # command = command # # AnsibleModule will verify most stuff, we need to verify # 'minutes' and 'service' manually. ################################################################## if action not in ['command', 'silence_nagios', 'unsilence_nagios']: if not host: module.fail_json(msg='no host specified for action requiring one') ###################################################################### if action == 'downtime': # Make sure there's an actual service selected if not services: module.fail_json(msg='no service selected to set downtime for') # Make sure minutes is a number try: m = int(minutes) if not isinstance(m, types.IntType): module.fail_json(msg='minutes must be a number') except Exception: module.fail_json(msg='invalid entry for minutes') ################################################################## if action in ['enable_alerts', 'disable_alerts']: if not services: module.fail_json(msg='a service is required when setting alerts') if action in ['command']: if not command: module.fail_json(msg='no command passed for command action') ################################################################## if not cmdfile: module.fail_json('unable to locate nagios.cfg') ################################################################## ansible_nagios = Nagios(module, **module.params) if module.check_mode: module.exit_json(changed=True) else: ansible_nagios.act() ################################################################## ###################################################################### class Nagios(object): """ Perform common tasks in Nagios related to downtime and notifications. The complete set of external commands Nagios handles is documented on their website: http://old.nagios.org/developerinfo/externalcommands/commandlist.php Note that in the case of `schedule_svc_downtime`, `enable_svc_notifications`, and `disable_svc_notifications`, the service argument should be passed as a list. """ def __init__(self, module, **kwargs): self.module = module self.action = kwargs['action'] self.author = kwargs['author'] self.host = kwargs['host'] self.minutes = int(kwargs['minutes']) self.cmdfile = kwargs['cmdfile'] self.command = kwargs['command'] if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): self.services = kwargs['services'] else: self.services = kwargs['services'].split(',') self.command_results = [] def _now(self): """ The time in seconds since 12:00:00AM Jan 1, 1970 """ return int(time.time()) def _write_command(self, cmd): """ Write the given command to the Nagios command file """ try: fp = open(self.cmdfile, 'w') fp.write(cmd) fp.flush() fp.close() self.command_results.append(cmd.strip()) except IOError: self.module.fail_json(msg='unable to write to nagios command file', cmdfile=self.cmdfile) def _fmt_dt_str(self, cmd, host, duration, author=None, comment="Scheduling downtime", start=None, svc=None, fixed=1, trigger=0): """ Format an external-command downtime string. cmd - Nagios command ID host - Host schedule downtime on duration - Minutes to schedule downtime for author - Name to file the downtime as comment - Reason for running this command (upgrade, reboot, etc) start - Start of downtime in seconds since 12:00AM Jan 1 1970 Default is to use the entry time (now) svc - Service to schedule downtime for, omit when for host downtime fixed - Start now if 1, start when a problem is detected if 0 trigger - Optional ID of event to start downtime from. Leave as 0 for fixed downtime. Syntax: [submitted] COMMAND;<host_name>;[<service_description>] <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>; <comment> """ entry_time = self._now() if start is None: start = entry_time hdr = "[%s] %s;%s;" % (entry_time, cmd, host) duration_s = (duration * 60) end = start + duration_s if not author: author = self.author if svc is not None: dt_args = [svc, str(start), str(end), str(fixed), str(trigger), str(duration_s), author, comment] else: # Downtime for a host if no svc specified dt_args = [str(start), str(end), str(fixed), str(trigger), str(duration_s), author, comment] dt_arg_str = ";".join(dt_args) dt_str = hdr + dt_arg_str + "\n" return dt_str def _fmt_notif_str(self, cmd, host=None, svc=None): """ Format an external-command notification string. cmd - Nagios command ID. host - Host to en/disable notifications on.. A value is not required for global downtime svc - Service to schedule downtime for. A value is not required for host downtime. Syntax: [submitted] COMMAND;<host_name>[;<service_description>] """ entry_time = self._now() notif_str = "[%s] %s" % (entry_time, cmd) if host is not None: notif_str += ";%s" % host if svc is not None: notif_str += ";%s" % svc notif_str += "\n" return notif_str def schedule_svc_downtime(self, host, services=None, minutes=30): """ This command is used to schedule downtime for a particular service. During the specified downtime, Nagios will not send notifications out about the service. Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description> <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>; <comment> """ cmd = "SCHEDULE_SVC_DOWNTIME" if services is None: services = [] for service in services: dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service) self._write_command(dt_cmd_str) def schedule_host_downtime(self, host, minutes=30): """ This command is used to schedule downtime for a particular host. During the specified downtime, Nagios will not send notifications out about the host. Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> """ cmd = "SCHEDULE_HOST_DOWNTIME" dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) self._write_command(dt_cmd_str) def schedule_host_svc_downtime(self, host, minutes=30): """ This command is used to schedule downtime for all services associated with a particular host. During the specified downtime, Nagios will not send notifications out about the host. SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> """ cmd = "SCHEDULE_HOST_SVC_DOWNTIME" dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) self._write_command(dt_cmd_str) def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30): """ This command is used to schedule downtime for all hosts in a particular hostgroup. During the specified downtime, Nagios will not send notifications out about the hosts. Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>; <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment> """ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) self._write_command(dt_cmd_str) def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30): """ This command is used to schedule downtime for all services in a particular hostgroup. During the specified downtime, Nagios will not send notifications out about the services. Note that scheduling downtime for services does not automatically schedule downtime for the hosts those services are associated with. Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>; <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment> """ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) self._write_command(dt_cmd_str) def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30): """ This command is used to schedule downtime for all hosts in a particular servicegroup. During the specified downtime, Nagios will not send notifications out about the hosts. Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>; <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>; <comment> """ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) self._write_command(dt_cmd_str) def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30): """ This command is used to schedule downtime for all services in a particular servicegroup. During the specified downtime, Nagios will not send notifications out about the services. Note that scheduling downtime for services does not automatically schedule downtime for the hosts those services are associated with. Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>; <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>; <comment> """ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) self._write_command(dt_cmd_str) def disable_host_svc_notifications(self, host): """ This command is used to prevent notifications from being sent out for all services on the specified host. Note that this command does not disable notifications from being sent out about the host. Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name> """ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, host) self._write_command(notif_str) def disable_host_notifications(self, host): """ This command is used to prevent notifications from being sent out for the specified host. Note that this command does not disable notifications for services associated with this host. Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name> """ cmd = "DISABLE_HOST_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, host) self._write_command(notif_str) def disable_svc_notifications(self, host, services=None): """ This command is used to prevent notifications from being sent out for the specified service. Note that this command does not disable notifications from being sent out about the host. Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description> """ cmd = "DISABLE_SVC_NOTIFICATIONS" if services is None: services = [] for service in services: notif_str = self._fmt_notif_str(cmd, host, svc=service) self._write_command(notif_str) def disable_servicegroup_host_notifications(self, servicegroup): """ This command is used to prevent notifications from being sent out for all hosts in the specified servicegroup. Note that this command does not disable notifications for services associated with hosts in this service group. Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name> """ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, servicegroup) self._write_command(notif_str) def disable_servicegroup_svc_notifications(self, servicegroup): """ This command is used to prevent notifications from being sent out for all services in the specified servicegroup. Note that this does not prevent notifications from being sent out about the hosts in this servicegroup. Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name> """ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, servicegroup) self._write_command(notif_str) def disable_hostgroup_host_notifications(self, hostgroup): """ Disables notifications for all hosts in a particular hostgroup. Note that this does not disable notifications for the services associated with the hosts in the hostgroup - see the DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> """ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, hostgroup) self._write_command(notif_str) def disable_hostgroup_svc_notifications(self, hostgroup): """ Disables notifications for all services associated with hosts in a particular hostgroup. Note that this does not disable notifications for the hosts in the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS command for that. Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name> """ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, hostgroup) self._write_command(notif_str) def enable_host_notifications(self, host): """ Enables notifications for a particular host. Note that this command does not enable notifications for services associated with this host. Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name> """ cmd = "ENABLE_HOST_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, host) self._write_command(notif_str) def enable_host_svc_notifications(self, host): """ Enables notifications for all services on the specified host. Note that this does not enable notifications for the host. Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name> """ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, host) nagios_return = self._write_command(notif_str) if nagios_return: return notif_str else: return "Fail: could not write to the command file" def enable_svc_notifications(self, host, services=None): """ Enables notifications for a particular service. Note that this does not enable notifications for the host. Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description> """ cmd = "ENABLE_SVC_NOTIFICATIONS" if services is None: services = [] nagios_return = True return_str_list = [] for service in services: notif_str = self._fmt_notif_str(cmd, host, svc=service) nagios_return = self._write_command(notif_str) and nagios_return return_str_list.append(notif_str) if nagios_return: return return_str_list else: return "Fail: could not write to the command file" def enable_hostgroup_host_notifications(self, hostgroup): """ Enables notifications for all hosts in a particular hostgroup. Note that this command does not enable notifications for services associated with the hosts in this hostgroup. Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> """ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, hostgroup) nagios_return = self._write_command(notif_str) if nagios_return: return notif_str else: return "Fail: could not write to the command file" def enable_hostgroup_svc_notifications(self, hostgroup): """ Enables notifications for all services that are associated with hosts in a particular hostgroup. Note that this does not enable notifications for the hosts in this hostgroup. Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name> """ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, hostgroup) nagios_return = self._write_command(notif_str) if nagios_return: return notif_str else: return "Fail: could not write to the command file" def enable_servicegroup_host_notifications(self, servicegroup): """ Enables notifications for all hosts that have services that are members of a particular servicegroup. Note that this command does not enable notifications for services associated with the hosts in this servicegroup. Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name> """ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, servicegroup) nagios_return = self._write_command(notif_str) if nagios_return: return notif_str else: return "Fail: could not write to the command file" def enable_servicegroup_svc_notifications(self, servicegroup): """ Enables notifications for all services that are members of a particular servicegroup. Note that this does not enable notifications for the hosts in this servicegroup. Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name> """ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" notif_str = self._fmt_notif_str(cmd, servicegroup) nagios_return = self._write_command(notif_str) if nagios_return: return notif_str else: return "Fail: could not write to the command file" def silence_host(self, host): """ This command is used to prevent notifications from being sent out for the host and all services on the specified host. This is equivalent to calling disable_host_svc_notifications and disable_host_notifications. Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name> Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name> """ cmd = [ "DISABLE_HOST_SVC_NOTIFICATIONS", "DISABLE_HOST_NOTIFICATIONS" ] nagios_return = True return_str_list = [] for c in cmd: notif_str = self._fmt_notif_str(c, host) nagios_return = self._write_command(notif_str) and nagios_return return_str_list.append(notif_str) if nagios_return: return return_str_list else: return "Fail: could not write to the command file" def unsilence_host(self, host): """ This command is used to enable notifications for the host and all services on the specified host. This is equivalent to calling enable_host_svc_notifications and enable_host_notifications. Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name> Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name> """ cmd = [ "ENABLE_HOST_SVC_NOTIFICATIONS", "ENABLE_HOST_NOTIFICATIONS" ] nagios_return = True return_str_list = [] for c in cmd: notif_str = self._fmt_notif_str(c, host) nagios_return = self._write_command(notif_str) and nagios_return return_str_list.append(notif_str) if nagios_return: return return_str_list else: return "Fail: could not write to the command file" def silence_nagios(self): """ This command is used to disable notifications for all hosts and services in nagios. This is a 'SHUT UP, NAGIOS' command """ cmd = 'DISABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) def unsilence_nagios(self): """ This command is used to enable notifications for all hosts and services in nagios. This is a 'OK, NAGIOS, GO'' command """ cmd = 'ENABLE_NOTIFICATIONS' self._write_command(self._fmt_notif_str(cmd)) def nagios_cmd(self, cmd): """ This sends an arbitrary command to nagios It prepends the submitted time and appends a \n You just have to provide the properly formatted command """ pre = '[%s]' % int(time.time()) post = '\n' cmdstr = '%s %s %s' % (pre, cmd, post) self._write_command(cmdstr) def act(self): """ Figure out what you want to do from ansible, and then do the needful (at the earliest). """ # host or service downtime? if self.action == 'downtime': if self.services == 'host': self.schedule_host_downtime(self.host, self.minutes) elif self.services == 'all': self.schedule_host_svc_downtime(self.host, self.minutes) else: self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes) # toggle the host AND service alerts elif self.action == 'silence': self.silence_host(self.host) elif self.action == 'unsilence': self.unsilence_host(self.host) # toggle host/svc alerts elif self.action == 'enable_alerts': if self.services == 'host': self.enable_host_notifications(self.host) else: self.enable_svc_notifications(self.host, services=self.services) elif self.action == 'disable_alerts': if self.services == 'host': self.disable_host_notifications(self.host) else: self.disable_svc_notifications(self.host, services=self.services) elif self.action == 'silence_nagios': self.silence_nagios() elif self.action == 'unsilence_nagios': self.unsilence_nagios() elif self.action == 'command': self.nagios_cmd(self.command) # wtf? else: self.module.fail_json(msg="unknown action specified: '%s'" % \ self.action) self.module.exit_json(nagios_commands=self.command_results, changed=True) ###################################################################### # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
mandeepdhami/nova
nova/tests/unit/network/test_neutronv2.py
9
172164
# Copyright 2012 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections import contextlib import copy import uuid import mock from mox3 import mox from neutronclient.common import exceptions from neutronclient.v2_0 import client from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils import six from six.moves import range from nova.compute import flavors from nova import context from nova import exception from nova.network import model from nova.network.neutronv2 import api as neutronapi from nova.network.neutronv2 import constants from nova import objects from nova.openstack.common import policy as common_policy from nova.pci import manager as pci_manager from nova.pci import whitelist as pci_whitelist from nova import policy from nova import test from nova.tests.unit import fake_instance CONF = cfg.CONF # NOTE: Neutron client raises Exception which is discouraged by HACKING. # We set this variable here and use it for assertions below to avoid # the hacking checks until we can make neutron client throw a custom # exception class instead. NEUTRON_CLIENT_EXCEPTION = Exception fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': 'fake-uuid', 'network_info': '[]', } class MyComparator(mox.Comparator): def __init__(self, lhs): self.lhs = lhs def _com_dict(self, lhs, rhs): if len(lhs) != len(rhs): return False for key, value in six.iteritems(lhs): if key not in rhs: return False rhs_value = rhs[key] if not self._com(value, rhs_value): return False return True def _com_list(self, lhs, rhs): if len(lhs) != len(rhs): return False for lhs_value in lhs: if lhs_value not in rhs: return False return True def _com(self, lhs, rhs): if lhs is None: return rhs is None if isinstance(lhs, dict): if not isinstance(rhs, dict): return False return self._com_dict(lhs, rhs) if isinstance(lhs, list): if not isinstance(rhs, list): return False return self._com_list(lhs, rhs) if isinstance(lhs, tuple): if not isinstance(rhs, tuple): return False return self._com_list(lhs, rhs) return lhs == rhs def equals(self, rhs): return self._com(self.lhs, rhs) def __repr__(self): return str(self.lhs) class TestNeutronClient(test.NoDBTestCase): def setUp(self): super(TestNeutronClient, self).setUp() neutronapi.reset_state() def test_withtoken(self): self.flags(url='http://anyhost/', group='neutron') self.flags(timeout=30, group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') cl = neutronapi.get_client(my_context) self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override) self.assertEqual(my_context.auth_token, cl.httpclient.auth.auth_token) self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout) def test_withouttoken(self): my_context = context.RequestContext('userid', 'my_tenantid') self.assertRaises(exceptions.Unauthorized, neutronapi.get_client, my_context) def test_withtoken_context_is_admin(self): self.flags(url='http://anyhost/', group='neutron') self.flags(timeout=30, group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token', is_admin=True) cl = neutronapi.get_client(my_context) self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override) self.assertEqual(my_context.auth_token, cl.httpclient.auth.auth_token) self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout) def test_withouttoken_keystone_connection_error(self): self.flags(auth_strategy='keystone', group='neutron') self.flags(url='http://anyhost/', group='neutron') my_context = context.RequestContext('userid', 'my_tenantid') self.assertRaises(NEUTRON_CLIENT_EXCEPTION, neutronapi.get_client, my_context) @mock.patch('nova.network.neutronv2.api._ADMIN_AUTH') @mock.patch.object(client.Client, "list_networks", new=mock.Mock()) def test_reuse_admin_token(self, m): self.flags(url='http://anyhost/', group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') tokens = ['new_token2', 'new_token1'] def token_vals(*args, **kwargs): return tokens.pop() m.get_token.side_effect = token_vals client1 = neutronapi.get_client(my_context, True) client1.list_networks(retrieve_all=False) self.assertEqual('new_token1', client1.httpclient.auth.get_token(None)) client1 = neutronapi.get_client(my_context, True) client1.list_networks(retrieve_all=False) self.assertEqual('new_token2', client1.httpclient.auth.get_token(None)) class TestNeutronv2Base(test.TestCase): def setUp(self): super(TestNeutronv2Base, self).setUp() self.context = context.RequestContext('userid', 'my_tenantid') setattr(self.context, 'auth_token', 'bff4a5a6b9eb4ea2a6efec6eefb77936') self.tenant_id = '9d049e4b60b64716978ab415e6fbd5c0' self.instance = {'project_id': self.tenant_id, 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance', 'availability_zone': 'nova', 'host': 'some_host', 'info_cache': {'network_info': []}, 'security_groups': []} self.instance2 = {'project_id': self.tenant_id, 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance2', 'availability_zone': 'nova', 'info_cache': {'network_info': []}, 'security_groups': []} self.nets1 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'my_tenantid'}] self.nets2 = [] self.nets2.append(self.nets1[0]) self.nets2.append({'id': 'my_netid2', 'name': 'my_netname2', 'subnets': ['mysubnid2'], 'tenant_id': 'my_tenantid'}) self.nets3 = self.nets2 + [{'id': 'my_netid3', 'name': 'my_netname3', 'tenant_id': 'my_tenantid'}] self.nets4 = [{'id': 'his_netid4', 'name': 'his_netname4', 'tenant_id': 'his_tenantid'}] # A network request with external networks self.nets5 = self.nets1 + [{'id': 'the-external-one', 'name': 'out-of-this-world', 'router:external': True, 'tenant_id': 'should-be-an-admin'}] # A network request with a duplicate self.nets6 = [] self.nets6.append(self.nets1[0]) self.nets6.append(self.nets1[0]) # A network request with a combo self.nets7 = [] self.nets7.append(self.nets2[1]) self.nets7.append(self.nets1[0]) self.nets7.append(self.nets2[1]) self.nets7.append(self.nets1[0]) # A network request with only external network self.nets8 = [self.nets5[1]] # An empty network self.nets9 = [] # A network that is both shared and external self.nets10 = [{'id': 'net_id', 'name': 'net_name', 'router:external': True, 'shared': True}] self.nets = [self.nets1, self.nets2, self.nets3, self.nets4, self.nets5, self.nets6, self.nets7, self.nets8, self.nets9, self.nets10] self.port_address = '10.0.1.2' self.port_data1 = [{'network_id': 'my_netid1', 'device_id': self.instance2['uuid'], 'tenant_id': self.tenant_id, 'device_owner': 'compute:nova', 'id': 'my_portid1', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'status': 'DOWN', 'admin_state_up': True, 'fixed_ips': [{'ip_address': self.port_address, 'subnet_id': 'my_subid1'}], 'mac_address': 'my_mac1', }] self.float_data1 = [{'port_id': 'my_portid1', 'fixed_ip_address': self.port_address, 'floating_ip_address': '172.0.1.2'}] self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9', 'subnet_id': 'my_subid1'}], 'status': 'ACTIVE', 'admin_state_up': True}] self.port_address2 = '10.0.2.2' self.port_data2 = [] self.port_data2.append(self.port_data1[0]) self.port_data2.append({'network_id': 'my_netid2', 'device_id': self.instance['uuid'], 'tenant_id': self.tenant_id, 'admin_state_up': True, 'status': 'ACTIVE', 'device_owner': 'compute:nova', 'id': 'my_portid2', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'fixed_ips': [{'ip_address': self.port_address2, 'subnet_id': 'my_subid2'}], 'mac_address': 'my_mac2', }) self.float_data2 = [] self.float_data2.append(self.float_data1[0]) self.float_data2.append({'port_id': 'my_portid2', 'fixed_ip_address': '10.0.2.2', 'floating_ip_address': '172.0.2.2'}) self.port_data3 = [{'network_id': 'my_netid1', 'device_id': 'device_id3', 'tenant_id': self.tenant_id, 'status': 'DOWN', 'admin_state_up': True, 'device_owner': 'compute:nova', 'id': 'my_portid3', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'fixed_ips': [], # no fixed ip 'mac_address': 'my_mac3', }] self.subnet_data1 = [{'id': 'my_subid1', 'cidr': '10.0.1.0/24', 'network_id': 'my_netid1', 'gateway_ip': '10.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}] self.subnet_data2 = [] self.subnet_data_n = [{'id': 'my_subid1', 'cidr': '10.0.1.0/24', 'network_id': 'my_netid1', 'gateway_ip': '10.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}, {'id': 'my_subid2', 'cidr': '20.0.1.0/24', 'network_id': 'my_netid2', 'gateway_ip': '20.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}] self.subnet_data2.append({'id': 'my_subid2', 'cidr': '10.0.2.0/24', 'network_id': 'my_netid2', 'gateway_ip': '10.0.2.1', 'dns_nameservers': ['8.8.2.1', '8.8.2.2']}) self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3', 'name': 'ext_net', 'router:external': True, 'tenant_id': 'admin_tenantid'} self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db', 'name': 'nova', 'router:external': True, 'tenant_id': 'admin_tenantid'} self.fip_unassociated = {'tenant_id': 'my_tenantid', 'id': 'fip_id1', 'floating_ip_address': '172.24.4.227', 'floating_network_id': self.fip_pool['id'], 'port_id': None, 'fixed_ip_address': None, 'router_id': None} fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address'] self.fip_associated = {'tenant_id': 'my_tenantid', 'id': 'fip_id2', 'floating_ip_address': '172.24.4.228', 'floating_network_id': self.fip_pool['id'], 'port_id': self.port_data2[1]['id'], 'fixed_ip_address': fixed_ip_address, 'router_id': 'router_id1'} self._returned_nw_info = [] self.mox.StubOutWithMock(neutronapi, 'get_client') self.moxed_client = self.mox.CreateMock(client.Client) self.addCleanup(CONF.reset) self.addCleanup(self.mox.VerifyAll) self.addCleanup(self.mox.UnsetStubs) self.addCleanup(self.stubs.UnsetAll) def _fake_instance_object(self, instance): return fake_instance.fake_instance_obj(self.context, **instance) def _fake_instance_info_cache(self, nw_info): info_cache = {} info_cache['instance_uuid'] = str(uuid.uuid4()) info_cache['deleted'] = False info_cache['created_at'] = timeutils.utcnow() info_cache['deleted_at'] = timeutils.utcnow() info_cache['updated_at'] = timeutils.utcnow() info_cache['network_info'] = model.NetworkInfo.hydrate(six.text_type( jsonutils.dumps(nw_info))) return info_cache def _fake_instance_object_with_info_cache(self, instance): expected_attrs = ['info_cache'] instance = objects.Instance._from_db_object(self.context, objects.Instance(), fake_instance.fake_db_instance(**instance), expected_attrs=expected_attrs) return instance def _stub_allocate_for_instance(self, net_idx=1, **kwargs): self.instance = self._fake_instance_object(self.instance) self.instance2 = self._fake_instance_object(self.instance2) api = neutronapi.API() self.mox.StubOutWithMock(api, 'get_instance_nw_info') has_portbinding = False has_extra_dhcp_opts = False dhcp_options = kwargs.get('dhcp_options') if dhcp_options is not None: has_extra_dhcp_opts = True if kwargs.get('portbinding'): has_portbinding = True api.extensions[constants.PORTBINDING_EXT] = 1 self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) neutronapi.get_client( mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(has_portbinding) else: self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') # Net idx is 1-based for compatibility with existing unit tests nets = self.nets[net_idx - 1] ports = {} fixed_ips = {} macs = kwargs.get('macs') if macs: macs = set(macs) req_net_ids = [] ordered_networks = [] if 'requested_networks' in kwargs: for request in kwargs['requested_networks']: if request.port_id: if request.port_id == 'my_portid3': self.moxed_client.show_port(request.port_id ).AndReturn( {'port': {'id': 'my_portid3', 'network_id': 'my_netid1', 'tenant_id': self.tenant_id, 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and self.instance2.uuid or ''}}) ports['my_netid1'] = [self.port_data1[0], self.port_data3[0]] ports[request.port_id] = self.port_data3[0] request.network_id = 'my_netid1' if macs is not None: macs.discard('my_mac1') elif request.port_id == 'invalid_id': PortNotFound = exceptions.PortNotFoundClient( status_code=404) self.moxed_client.show_port(request.port_id ).AndRaise(PortNotFound) else: self.moxed_client.show_port(request.port_id).AndReturn( {'port': {'id': 'my_portid1', 'network_id': 'my_netid1', 'tenant_id': self.tenant_id, 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and self.instance2.uuid or ''}}) ports[request.port_id] = self.port_data1[0] request.network_id = 'my_netid1' if macs is not None: macs.discard('my_mac1') else: fixed_ips[request.network_id] = request.address req_net_ids.append(request.network_id) ordered_networks.append(request) else: for n in nets: ordered_networks.append( objects.NetworkRequest(network_id=n['id'])) if kwargs.get('_break') == 'pre_list_networks': self.mox.ReplayAll() return api # search all req_net_ids as in api.py search_ids = req_net_ids if search_ids: mox_list_params = {'id': mox.SameElementsAs(search_ids)} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) else: mox_list_params = {'tenant_id': self.instance.project_id, 'shared': False} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) mox_list_params = {'shared': True} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': []}) if kwargs.get('_break') == 'post_list_networks': self.mox.ReplayAll() return api if (('requested_networks' not in kwargs or kwargs['requested_networks'].as_tuples() == [(None, None, None)]) and len(nets) > 1): self.mox.ReplayAll() return api preexisting_port_ids = [] ports_in_requested_net_order = [] nets_in_requested_net_order = [] for request in ordered_networks: port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } # Network lookup for available network_id network = None for net in nets: if net['id'] == request.network_id: network = net break # if net_id did not pass validate_networks() and not available # here then skip it safely not continuing with a None Network else: continue if has_portbinding: port_req_body['port']['binding:host_id'] = ( self.instance.get('host')) if not has_portbinding: api._populate_neutron_extension_values(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), mox.IgnoreArg(), neutron=self.moxed_client).AndReturn(None) else: # since _populate_neutron_extension_values() will call # _has_port_binding_extension() api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client).\ AndReturn(has_portbinding) if request.port_id: port = ports[request.port_id] self.moxed_client.update_port(request.port_id, MyComparator(port_req_body) ).AndReturn( {'port': port}) ports_in_requested_net_order.append(request.port_id) preexisting_port_ids.append(request.port_id) else: request.address = fixed_ips.get(request.network_id) if request.address: port_req_body['port']['fixed_ips'] = [ {'ip_address': str(request.address)}] port_req_body['port']['network_id'] = request.network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = \ self.instance.project_id if macs: port_req_body['port']['mac_address'] = macs.pop() if has_portbinding: port_req_body['port']['binding:host_id'] = ( self.instance.get('host')) res_port = {'port': {'id': 'fake'}} if has_extra_dhcp_opts: port_req_body['port']['extra_dhcp_opts'] = dhcp_options if kwargs.get('_break') == 'mac' + request.network_id: self.mox.ReplayAll() return api self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn(res_port) ports_in_requested_net_order.append(res_port['port']['id']) nets_in_requested_net_order.append(network) api.get_instance_nw_info(mox.IgnoreArg(), self.instance, networks=nets_in_requested_net_order, port_ids=ports_in_requested_net_order, admin_client=None, preexisting_port_ids=preexisting_port_ids ).AndReturn(self._returned_nw_info) self.mox.ReplayAll() return api def _verify_nw_info(self, nw_inf, index=0): id_suffix = index + 1 self.assertEqual('10.0.%s.2' % id_suffix, nw_inf.fixed_ips()[index]['address']) self.assertEqual('172.0.%s.2' % id_suffix, nw_inf.fixed_ips()[index].floating_ip_addresses()[0]) self.assertEqual('my_netname%s' % id_suffix, nw_inf[index]['network']['label']) self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id']) self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address']) self.assertEqual('10.0.%s.0/24' % id_suffix, nw_inf[index]['network']['subnets'][0]['cidr']) ip_addr = model.IP(address='8.8.%s.1' % id_suffix, version=4, type='dns') self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns']) def _get_instance_nw_info(self, number): api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update(mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn( fake_info_cache) port_data = number == 1 and self.port_data1 or self.port_data2 net_info_cache = [] for port in port_data: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': port_data}) net_ids = [port['network_id'] for port in port_data] nets = number == 1 and self.nets1 or self.nets2 self.moxed_client.list_networks( id=net_ids).AndReturn({'networks': nets}) for i in range(1, number + 1): float_data = number == 1 and self.float_data1 or self.float_data2 for ip in port_data[i - 1]['fixed_ips']: float_data = [x for x in float_data if x['fixed_ip_address'] == ip['ip_address']] self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=port_data[i - 1]['id']).AndReturn( {'floatingips': float_data}) subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2 self.moxed_client.list_subnets( id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn( {'subnets': subnet_data}) self.moxed_client.list_ports( network_id=subnet_data[0]['network_id'], device_owner='network:dhcp').AndReturn( {'ports': []}) self.mox.ReplayAll() self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache) instance = self._fake_instance_object_with_info_cache(self.instance) nw_inf = api.get_instance_nw_info(self.context, instance) for i in range(0, number): self._verify_nw_info(nw_inf, i) def _allocate_for_instance(self, net_idx=1, **kwargs): api = self._stub_allocate_for_instance(net_idx, **kwargs) return api.allocate_for_instance(self.context, self.instance, **kwargs) class TestNeutronv2(TestNeutronv2Base): def setUp(self): super(TestNeutronv2, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_get_instance_nw_info_1(self): # Test to get one port in one network and subnet. neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self._get_instance_nw_info(1) def test_get_instance_nw_info_2(self): # Test to get one port in each of two networks and subnets. neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self._get_instance_nw_info(2) def test_get_instance_nw_info_with_nets_add_interface(self): # This tests that adding an interface to an instance does not # remove the first instance from the instance. network_model = model.Network(id='network_id', bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': self.port_data2[0]['id'], 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, self.port_data2, self.nets2, [self.port_data2[1]['id']]) def test_get_instance_nw_info_remove_ports_from_neutron(self): # This tests that when a port is removed in neutron it # is also removed from the nova. network_model = model.Network(id=self.port_data2[0]['network_id'], bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': 'network_id', 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, self.port_data2, None, None) def test_get_instance_nw_info_ignores_neutron_ports(self): # Tests that only ports in the network_cache are updated # and ports returned from neutron that match the same # instance_id/device_id are ignored. port_data2 = copy.copy(self.port_data2) # set device_id on the ports to be the same. port_data2[1]['device_id'] = port_data2[0]['device_id'] network_model = model.Network(id='network_id', bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': 'network_id', 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, port_data2, None, None) def _fake_get_instance_nw_info_helper(self, network_cache, current_neutron_ports, networks=None, port_ids=None): """Helper function to test get_instance_nw_info. :param network_cache - data already in the nova network cache. :param current_neutron_ports - updated list of ports from neutron. :param networks - networks of ports being added to instance. :param port_ids - new ports being added to instance. """ # keep a copy of the original ports/networks to pass to # get_instance_nw_info() as the code below changes them. original_port_ids = copy.copy(port_ids) original_networks = copy.copy(networks) api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update( mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache) neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': current_neutron_ports}) ifaces = network_cache['info_cache']['network_info'] if port_ids is None: port_ids = [iface['id'] for iface in ifaces] net_ids = [iface['network']['id'] for iface in ifaces] nets = [{'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces] if networks is None: self.moxed_client.list_networks( id=net_ids).AndReturn({'networks': nets}) else: networks = networks + [ dict(id=iface['network']['id'], name=iface['network']['label'], tenant_id=iface['network']['meta']['tenant_id']) for iface in ifaces] port_ids = [iface['id'] for iface in ifaces] + port_ids index = 0 current_neutron_port_map = {} for current_neutron_port in current_neutron_ports: current_neutron_port_map[current_neutron_port['id']] = ( current_neutron_port) for port_id in port_ids: current_neutron_port = current_neutron_port_map.get(port_id) if current_neutron_port: for ip in current_neutron_port['fixed_ips']: self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=current_neutron_port['id']).AndReturn( {'floatingips': [self.float_data2[index]]}) self.moxed_client.list_subnets( id=mox.SameElementsAs([ip['subnet_id']]) ).AndReturn( {'subnets': [self.subnet_data_n[index]]}) self.moxed_client.list_ports( network_id=current_neutron_port['network_id'], device_owner='network:dhcp').AndReturn( {'ports': self.dhcp_port_data1}) index += 1 self.mox.ReplayAll() self.instance['info_cache'] = network_cache self.instance['info_cache'] = self._fake_instance_info_cache( network_cache['info_cache']['network_info']) instance = self._fake_instance_object_with_info_cache(self.instance) nw_infs = api.get_instance_nw_info(self.context, instance, networks=original_networks, port_ids=original_port_ids) self.assertEqual(index, len(nw_infs)) # ensure that nic ordering is preserved for iface_index in range(index): self.assertEqual(nw_infs[iface_index]['id'], port_ids[iface_index]) def test_get_instance_nw_info_without_subnet(self): # Test get instance_nw_info for a port without subnet. api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update( mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': self.port_data3}) self.moxed_client.list_networks( id=[self.port_data1[0]['network_id']]).AndReturn( {'networks': self.nets1}) neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) net_info_cache = [] for port in self.port_data3: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nw_inf = api.get_instance_nw_info(self.context, instance) id_suffix = 3 self.assertEqual(0, len(nw_inf.fixed_ips())) self.assertEqual('my_netname1', nw_inf[0]['network']['label']) self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id']) self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address']) self.assertEqual(0, len(nw_inf[0]['network']['subnets'])) def test_refresh_neutron_extensions_cache(self): api = neutronapi.API() # Note: Don't want the default get_client from setUp() self.mox.ResetAll() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.QOS_QUEUE}]}) self.mox.ReplayAll() api._refresh_neutron_extensions_cache(mox.IgnoreArg()) self.assertEqual( {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}}, api.extensions) def test_populate_neutron_extension_values_rxtx_factor(self): api = neutronapi.API() # Note: Don't want the default get_client from setUp() self.mox.ResetAll() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.QOS_QUEUE}]}) self.mox.ReplayAll() flavor = flavors.get_default_flavor() flavor['rxtx_factor'] = 1 instance = objects.Instance(system_metadata={}) with mock.patch.object(instance, 'save'): instance.set_flavor(flavor) port_req_body = {'port': {}} api._populate_neutron_extension_values(self.context, instance, None, port_req_body) self.assertEqual(port_req_body['port']['rxtx_factor'], 1) def test_allocate_for_instance_1(self): # Allocate one port in one network env. self._allocate_for_instance(1) def test_allocate_for_instance_2(self): # Allocate one port in two networks env. api = self._stub_allocate_for_instance(net_idx=2) self.assertRaises(exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_accepts_macs_kwargs_None(self): # The macs kwarg should be accepted as None. self._allocate_for_instance(1, macs=None) def test_allocate_for_instance_accepts_macs_kwargs_set(self): # The macs kwarg should be accepted, as a set, the # _allocate_for_instance helper checks that the mac is used to create a # port. self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45'])) def test_allocate_for_instance_accepts_only_portid(self): # Make sure allocate_for_instance works when only a portid is provided self._returned_nw_info = self.port_data1 result = self._allocate_for_instance( requested_networks=objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')])) self.assertEqual(self.port_data1, result) @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_not_enough_macs_via_ports(self, mock_unbind): # using a hypervisor MAC via a pre-created port will stop it being # used to dynamically create a port on a network. We put the network # first in requested_networks so that if the code were to not pre-check # requested ports, it would incorrectly assign the mac and not fail. requested_networks = objects.NetworkRequestList( objects = [ objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac1']), _break='mac' + self.nets2[1]['id']) self.assertRaises(exception.PortNotFree, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['my_mac1'])) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_not_enough_macs(self, mock_unbind): # If not enough MAC addresses are available to allocate to networks, an # error should be raised. # We could pass in macs=set(), but that wouldn't tell us that # allocate_for_instance tracks used macs properly, so we pass in one # mac, and ask for two networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(network_id=self.nets2[0]['id'])]) api = self._stub_allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac2']), _break='mac' + self.nets2[0]['id']) with mock.patch.object(api, '_delete_ports'): self.assertRaises(exception.PortNotFree, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['my_mac2'])) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) def test_allocate_for_instance_two_macs_two_networks(self): # If two MACs are available and two networks requested, two new ports # get made and no exceptions raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(network_id=self.nets2[0]['id'])]) self._allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac2', 'my_mac1'])) def test_allocate_for_instance_mac_conflicting_requested_port(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( net_idx=1, requested_networks=requested_networks, macs=set(['unknown:mac']), _break='pre_list_networks') self.assertRaises(exception.PortNotUsable, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['unknown:mac'])) def test_allocate_for_instance_without_requested_networks(self): api = self._stub_allocate_for_instance(net_idx=3) self.assertRaises(exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_requested_non_available_network(self): """verify that a non available network is ignored. self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1] Do not create a port on a non available network self.nets3[2]. """ requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets3[0], self.nets3[2], self.nets3[1])]) self._allocate_for_instance(net_idx=2, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets3[1], self.nets3[0], self.nets3[2])]) self._allocate_for_instance(net_idx=3, requested_networks=requested_networks) def test_allocate_for_instance_with_invalid_network_id(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='invalid_id')]) api = self._stub_allocate_for_instance(net_idx=9, requested_networks=requested_networks, _break='post_list_networks') self.assertRaises(exception.NetworkNotFound, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_fixedip(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'], address='10.0.1.0')]) self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_port(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) def test_allocate_for_instance_no_networks(self): """verify the exception thrown when there are no networks defined.""" self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.moxed_client.list_extensions().AndReturn({'extensions': []}) self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': model.NetworkInfo([])}) self.moxed_client.list_networks(shared=True).AndReturn( {'networks': model.NetworkInfo([])}) self.mox.ReplayAll() nwinfo = api.allocate_for_instance(self.context, self.instance) self.assertEqual(len(nwinfo), 0) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_ex1(self, mock_unbind, mock_preexisting): """verify we will delete created ports if we fail to allocate all net resources. Mox to raise exception when creating a second port. In this case, the code should delete the first created port. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) mock_preexisting.return_value = [] api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(False) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets2[0], self.nets2[1])]) self.moxed_client.list_networks( id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) index = 0 for network in self.nets2: binding_port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } port_req_body = { 'port': { 'network_id': network['id'], 'admin_state_up': True, 'tenant_id': self.instance.project_id, }, } port_req_body['port'].update(binding_port_req_body['port']) port = {'id': 'portid_' + network['id']} api._populate_neutron_extension_values(self.context, self.instance, None, binding_port_req_body, neutron=self.moxed_client).AndReturn(None) if index == 0: self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn({'port': port}) else: NeutronOverQuota = exceptions.OverQuotaClient() self.moxed_client.create_port( MyComparator(port_req_body)).AndRaise(NeutronOverQuota) index += 1 self.moxed_client.delete_port('portid_' + self.nets2[0]['id']) self.mox.ReplayAll() self.assertRaises(exception.PortLimitExceeded, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) def test_allocate_for_instance_ex2(self): """verify we have no port to delete if we fail to allocate the first net resource. Mox to raise exception when creating the first port. In this case, the code should not delete any ports. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(False) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets2[0], self.nets2[1])]) self.moxed_client.list_networks( id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) binding_port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } port_req_body = { 'port': { 'network_id': self.nets2[0]['id'], 'admin_state_up': True, 'device_id': self.instance.uuid, 'tenant_id': self.instance.project_id, }, } api._populate_neutron_extension_values(self.context, self.instance, None, binding_port_req_body, neutron=self.moxed_client).AndReturn(None) self.moxed_client.create_port( MyComparator(port_req_body)).AndRaise( Exception("fail to create port")) self.mox.ReplayAll() self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_no_port_or_network(self): class BailOutEarly(Exception): pass self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.moxed_client.list_extensions().AndReturn({'extensions': []}) self.mox.StubOutWithMock(api, '_get_available_networks') # Make sure we get an empty list and then bail out of the rest # of the function api._get_available_networks(self.context, self.instance.project_id, [], neutron=self.moxed_client).\ AndRaise(BailOutEarly) self.mox.ReplayAll() requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest()]) self.assertRaises(BailOutEarly, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_second_time(self): # Make sure that allocate_for_instance only returns ports that it # allocated during _that_ run. new_port = {'id': 'fake'} self._returned_nw_info = self.port_data1 + [new_port] nw_info = self._allocate_for_instance() self.assertEqual(nw_info, [new_port]) def test_allocate_for_instance_port_in_use(self): # If a port is already in use, an exception should be raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks', _device=True) self.assertRaises(exception.PortInUse, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_port_not_found(self): # If a port is not found, an exception should be raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='invalid_id')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks') self.assertRaises(exception.PortNotFound, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_port_invalid_tenantid(self): self.tenant_id = 'invalid_id' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks') self.assertRaises(exception.PortNotUsable, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_with_externalnet_forbidden(self): """Only one network is available, it's external, and the client is unauthorized to use it. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) self.moxed_client.list_extensions().AndReturn({'extensions': []}) # no networks in the tenant self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': model.NetworkInfo([])}) # external network is shared self.moxed_client.list_networks(shared=True).AndReturn( {'networks': self.nets8}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.ExternalNetworkAttachForbidden, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_externalnet_multiple(self): """Multiple networks are available, one the client is authorized to use, and an external one the client is unauthorized to use. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) self.moxed_client.list_extensions().AndReturn({'extensions': []}) # network found in the tenant self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': self.nets1}) # external network is shared self.moxed_client.list_networks(shared=True).AndReturn( {'networks': self.nets8}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises( exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_externalnet_admin_ctx(self): """Only one network is available, it's external, and the client is authorized. """ admin_ctx = context.RequestContext('userid', 'my_tenantid', is_admin=True) api = self._stub_allocate_for_instance(net_idx=8) api.allocate_for_instance(admin_ctx, self.instance) def test_allocate_for_instance_with_external_shared_net(self): """Only one network is available, it's external and shared.""" ctx = context.RequestContext('userid', 'my_tenantid') api = self._stub_allocate_for_instance(net_idx=10) api.allocate_for_instance(ctx, self.instance) def _deallocate_for_instance(self, number, requested_networks=None): # TODO(mriedem): Remove this conversion when all neutronv2 APIs are # converted to handling instance objects. self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() port_data = number == 1 and self.port_data1 or self.port_data2 ports = {port['id'] for port in port_data} ret_data = copy.deepcopy(port_data) if requested_networks: if isinstance(requested_networks, objects.NetworkRequestList): # NOTE(danms): Temporary and transitional with mock.patch('nova.utils.is_neutron', return_value=True): requested_networks = requested_networks.as_tuples() for net, fip, port, request_id in requested_networks: ret_data.append({'network_id': net, 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', 'id': port, 'status': 'DOWN', 'admin_state_up': True, 'fixed_ips': [], 'mac_address': 'fake_mac', }) self.moxed_client.list_ports( device_id=self.instance.uuid).AndReturn( {'ports': ret_data}) self.moxed_client.list_extensions().AndReturn({'extensions': []}) if requested_networks: for net, fip, port, request_id in requested_networks: self.moxed_client.update_port(port) for port in ports: self.moxed_client.delete_port(port) self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update(self.context, self.instance.uuid, {'network_info': '[]'}).AndReturn( fake_info_cache) self.mox.ReplayAll() api = neutronapi.API() api.deallocate_for_instance(self.context, self.instance, requested_networks=requested_networks) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_1_with_requested(self, mock_preexisting): mock_preexisting.return_value = [] requested = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='fake-net', address='1.2.3.4', port_id='fake-port')]) # Test to deallocate in one port env. self._deallocate_for_instance(1, requested_networks=requested) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_2_with_requested(self, mock_preexisting): mock_preexisting.return_value = [] requested = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='fake-net', address='1.2.3.4', port_id='fake-port')]) # Test to deallocate in one port env. self._deallocate_for_instance(2, requested_networks=requested) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_1(self, mock_preexisting): mock_preexisting.return_value = [] # Test to deallocate in one port env. self._deallocate_for_instance(1) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_2(self, mock_preexisting): mock_preexisting.return_value = [] # Test to deallocate in two ports env. self._deallocate_for_instance(2) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_port_not_found(self, mock_preexisting): # TODO(mriedem): Remove this conversion when all neutronv2 APIs are # converted to handling instance objects. self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) mock_preexisting.return_value = [] port_data = self.port_data1 self.moxed_client.list_ports( device_id=self.instance.uuid).AndReturn( {'ports': port_data}) self.moxed_client.list_extensions().AndReturn({'extensions': []}) NeutronNotFound = exceptions.NeutronClientException(status_code=404) for port in reversed(port_data): self.moxed_client.delete_port(port['id']).AndRaise( NeutronNotFound) self.mox.ReplayAll() api = neutronapi.API() api.deallocate_for_instance(self.context, self.instance) def _test_deallocate_port_for_instance(self, number): port_data = number == 1 and self.port_data1 or self.port_data2 nets = number == 1 and self.nets1 or self.nets2 self.moxed_client.delete_port(port_data[0]['id']) net_info_cache = [] for port in port_data: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache) api = neutronapi.API() neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': port_data[1:]}) neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) net_ids = [port['network_id'] for port in port_data] self.moxed_client.list_networks(id=net_ids).AndReturn( {'networks': nets}) float_data = number == 1 and self.float_data1 or self.float_data2 for data in port_data[1:]: for ip in data['fixed_ips']: self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=data['id']).AndReturn( {'floatingips': float_data[1:]}) for port in port_data[1:]: self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({}) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nwinfo = api.deallocate_port_for_instance(self.context, instance, port_data[0]['id']) self.assertEqual(len(nwinfo), len(port_data[1:])) if len(port_data) > 1: self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2') def test_deallocate_port_for_instance_1(self): # Test to deallocate the first and only port self._test_deallocate_port_for_instance(1) def test_deallocate_port_for_instance_2(self): # Test to deallocate the first port of two self._test_deallocate_port_for_instance(2) def test_list_ports(self): search_opts = {'parm': 'value'} self.moxed_client.list_ports(**search_opts) self.mox.ReplayAll() neutronapi.API().list_ports(self.context, **search_opts) def test_show_port(self): self.moxed_client.show_port('foo').AndReturn( {'port': self.port_data1[0]}) self.mox.ReplayAll() neutronapi.API().show_port(self.context, 'foo') def test_validate_networks(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None)] ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': []}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_without_port_quota_on_network_side(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None)] ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': []}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {}}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_ex_1(self): requested_networks = [('my_netid1', None, None, None)] self.moxed_client.list_networks( id=mox.SameElementsAs(['my_netid1'])).AndReturn( {'networks': self.nets1}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': []}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.mox.ReplayAll() api = neutronapi.API() try: api.validate_networks(self.context, requested_networks, 1) except exception.NetworkNotFound as ex: self.assertIn("my_netid2", six.text_type(ex)) def test_validate_networks_ex_2(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None), ('my_netid3', None, None, None)] ids = ['my_netid1', 'my_netid2', 'my_netid3'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.mox.ReplayAll() api = neutronapi.API() try: api.validate_networks(self.context, requested_networks, 1) except exception.NetworkNotFound as ex: self.assertIn("my_netid2", six.text_type(ex)) self.assertIn("my_netid3", six.text_type(ex)) def test_validate_networks_duplicate_enable(self): # Verify that no duplicateNetworks exception is thrown when duplicate # network ids are passed to validate_networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid1')]) ids = ['my_netid1', 'my_netid1'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': []}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_allocate_for_instance_with_requested_networks_duplicates(self): # specify a duplicate network to allocate to instance requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets6[0], self.nets6[1])]) self._allocate_for_instance(net_idx=6, requested_networks=requested_networks) def test_allocate_for_instance_requested_networks_duplicates_port(self): # specify first port and last port that are in same network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port['id']) for port in (self.port_data1[0], self.port_data3[0])]) self._allocate_for_instance(net_idx=6, requested_networks=requested_networks) def test_allocate_for_instance_requested_networks_duplicates_combo(self): # specify a combo net_idx=7 : net2, port in net1, net2, port in net1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid2'), objects.NetworkRequest(port_id=self.port_data1[0]['id']), objects.NetworkRequest(network_id='my_netid2'), objects.NetworkRequest(port_id=self.port_data3[0]['id'])]) self._allocate_for_instance(net_idx=7, requested_networks=requested_networks) def test_validate_networks_not_specified(self): requested_networks = objects.NetworkRequestList(objects=[]) self.moxed_client.list_networks( tenant_id=self.context.project_id, shared=False).AndReturn( {'networks': self.nets1}) self.moxed_client.list_networks( shared=True).AndReturn( {'networks': self.nets2}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.NetworkAmbiguous, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_not_found(self): # Verify that the correct exception is thrown when a non existent # port is passed to validate_networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id='my_netid1', port_id='3123-ad34-bc43-32332ca33e')]) PortNotFound = exceptions.PortNotFoundClient() self.moxed_client.show_port(requested_networks[0].port_id).AndRaise( PortNotFound) self.mox.ReplayAll() # Expected call from setUp. neutronapi.get_client(None) api = neutronapi.API() self.assertRaises(exception.PortNotFound, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_show_rasies_non404(self): # Verify that the correct exception is thrown when a non existent # port is passed to validate_networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id='my_netid1', port_id='3123-ad34-bc43-32332ca33e')]) NeutronNotFound = exceptions.NeutronClientException(status_code=0) self.moxed_client.show_port(requested_networks[0].port_id).AndRaise( NeutronNotFound) self.mox.ReplayAll() # Expected call from setUp. neutronapi.get_client(None) api = neutronapi.API() self.assertRaises(exception.NovaException, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_in_use(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])]) self.moxed_client.show_port(self.port_data3[0]['id']).\ AndReturn({'port': self.port_data3[0]}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.PortInUse, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_no_subnet_id(self): port_a = self.port_data3[0] port_a['device_id'] = None port_a['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.PortRequiresFixedIP, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_no_subnet_id(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='his_netid4')]) ids = ['his_netid4'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets4}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.NetworkRequiresSubnet, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_ports_in_same_network_enable(self): # Verify that duplicateNetworks exception is not thrown when ports # on same duplicate network are passed to validate_networks. port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data1[0] self.assertEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn( {'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn( {'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_ports_not_in_same_network(self): port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data2[1] self.assertNotEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_no_quota(self): # Test validation for a request for one instance needing # two ports, where the quota is 2 and 2 ports are in use # => instances which can be created = 0 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': self.port_data2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 2}}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(max_count, 0) def test_validate_networks_with_ports_and_networks(self): # Test validation for a request for one instance needing # one port allocated via nova with another port being passed in. port_b = self.port_data2[1] port_b['device_id'] = None port_b['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) ids = ['my_netid1'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': self.port_data2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 5}}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(max_count, 1) def test_validate_networks_one_port_and_no_networks(self): # Test that show quota is not called if no networks are # passed in and only ports. port_b = self.port_data2[1] port_b['device_id'] = None port_b['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(max_count, 1) def test_validate_networks_some_quota(self): # Test validation for a request for two instance needing # two ports each, where the quota is 5 and 2 ports are in use # => instances which can be created = 1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': self.port_data2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 5}}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 2) self.assertEqual(max_count, 1) def test_validate_networks_unlimited_quota(self): # Test validation for a request for two instance needing # two ports each, where the quota is -1 (unlimited) # => instances which can be created = 1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( {'ports': self.port_data2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': -1}}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 2) self.assertEqual(max_count, 2) def test_validate_networks_no_quota_but_ports_supplied(self): port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data2[1] self.assertNotEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(max_count, 1) def _mock_list_ports(self, port_data=None): if port_data is None: port_data = self.port_data2 address = self.port_address self.moxed_client.list_ports( fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn( {'ports': port_data}) self.mox.ReplayAll() return address def test_get_fixed_ip_by_address_fails_for_no_ports(self): address = self._mock_list_ports(port_data=[]) api = neutronapi.API() self.assertRaises(exception.FixedIpNotFoundForAddress, api.get_fixed_ip_by_address, self.context, address) def test_get_fixed_ip_by_address_succeeds_for_1_port(self): address = self._mock_list_ports(port_data=self.port_data1) api = neutronapi.API() result = api.get_fixed_ip_by_address(self.context, address) self.assertEqual(self.instance2['uuid'], result['instance_uuid']) def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self): address = self._mock_list_ports() api = neutronapi.API() self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances, api.get_fixed_ip_by_address, self.context, address) def _get_available_networks(self, prv_nets, pub_nets, req_ids=None, context=None): api = neutronapi.API() nets = prv_nets + pub_nets if req_ids: mox_list_params = {'id': req_ids} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) else: mox_list_params = {'tenant_id': self.instance['project_id'], 'shared': False} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': prv_nets}) mox_list_params = {'shared': True} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': pub_nets}) self.mox.ReplayAll() rets = api._get_available_networks( context if context else self.context, self.instance['project_id'], req_ids) self.assertEqual(rets, nets) def test_get_available_networks_all_private(self): self._get_available_networks(prv_nets=self.nets2, pub_nets=[]) def test_get_available_networks_all_public(self): self._get_available_networks(prv_nets=[], pub_nets=self.nets2) def test_get_available_networks_private_and_public(self): self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4) def test_get_available_networks_with_network_ids(self): prv_nets = [self.nets3[0]] pub_nets = [self.nets3[-1]] # specify only first and last network req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])] self._get_available_networks(prv_nets, pub_nets, req_ids) def test_get_available_networks_with_custom_policy(self): rules = {'network:attach_external_network': common_policy.parse_rule('')} policy.set_rules(rules) req_ids = [net['id'] for net in self.nets5] self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids) def test_get_floating_ip_pools(self): api = neutronapi.API() search_opts = {'router:external': True} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]}) self.mox.ReplayAll() pools = api.get_floating_ip_pools(self.context) expected = [self.fip_pool['name'], self.fip_pool_nova['name']] self.assertEqual(expected, pools) def _get_expected_fip_model(self, fip_data, idx=0): expected = {'id': fip_data['id'], 'address': fip_data['floating_ip_address'], 'pool': self.fip_pool['name'], 'project_id': fip_data['tenant_id'], 'fixed_ip_id': fip_data['port_id'], 'fixed_ip': {'address': fip_data['fixed_ip_address']}, 'instance': ({'uuid': self.port_data2[idx]['device_id']} if fip_data['port_id'] else None)} if expected['instance'] is not None: expected['fixed_ip']['instance_uuid'] = \ expected['instance']['uuid'] return expected def _test_get_floating_ip(self, fip_data, idx=0, by_address=False): api = neutronapi.API() fip_id = fip_data['id'] net_id = fip_data['floating_network_id'] address = fip_data['floating_ip_address'] if by_address: self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [fip_data]}) else: self.moxed_client.show_floatingip(fip_id).\ AndReturn({'floatingip': fip_data}) self.moxed_client.show_network(net_id).\ AndReturn({'network': self.fip_pool}) if fip_data['port_id']: self.moxed_client.show_port(fip_data['port_id']).\ AndReturn({'port': self.port_data2[idx]}) self.mox.ReplayAll() expected = self._get_expected_fip_model(fip_data, idx) if by_address: fip = api.get_floating_ip_by_address(self.context, address) else: fip = api.get_floating_ip(self.context, fip_id) self.assertEqual(expected, fip) def test_get_floating_ip_unassociated(self): self._test_get_floating_ip(self.fip_unassociated, idx=0) def test_get_floating_ip_associated(self): self._test_get_floating_ip(self.fip_associated, idx=1) def test_get_floating_ip_by_address(self): self._test_get_floating_ip(self.fip_unassociated, idx=0, by_address=True) def test_get_floating_ip_by_address_associated(self): self._test_get_floating_ip(self.fip_associated, idx=1, by_address=True) def test_get_floating_ip_by_address_not_found(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': []}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpNotFoundForAddress, api.get_floating_ip_by_address, self.context, address) def test_get_floating_ip_by_id_not_found(self): api = neutronapi.API() NeutronNotFound = exceptions.NeutronClientException(status_code=404) floating_ip_id = self.fip_unassociated['id'] self.moxed_client.show_floatingip(floating_ip_id).\ AndRaise(NeutronNotFound) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpNotFound, api.get_floating_ip, self.context, floating_ip_id) def test_get_floating_ip_raises_non404(self): api = neutronapi.API() NeutronNotFound = exceptions.NeutronClientException(status_code=0) floating_ip_id = self.fip_unassociated['id'] self.moxed_client.show_floatingip(floating_ip_id).\ AndRaise(NeutronNotFound) self.mox.ReplayAll() self.assertRaises(exceptions.NeutronClientException, api.get_floating_ip, self.context, floating_ip_id) def test_get_floating_ip_by_address_multiple_found(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated] * 2}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpMultipleFoundForAddress, api.get_floating_ip_by_address, self.context, address) def test_get_floating_ips_by_project(self): api = neutronapi.API() project_id = self.context.project_id self.moxed_client.list_floatingips(tenant_id=project_id).\ AndReturn({'floatingips': [self.fip_unassociated, self.fip_associated]}) search_opts = {'router:external': True} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]}) self.moxed_client.list_ports(tenant_id=project_id).\ AndReturn({'ports': self.port_data2}) self.mox.ReplayAll() expected = [self._get_expected_fip_model(self.fip_unassociated), self._get_expected_fip_model(self.fip_associated, idx=1)] fips = api.get_floating_ips_by_project(self.context) self.assertEqual(expected, fips) def _test_get_instance_id_by_floating_address(self, fip_data, associated=False): api = neutronapi.API() address = fip_data['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [fip_data]}) if associated: self.moxed_client.show_port(fip_data['port_id']).\ AndReturn({'port': self.port_data2[1]}) self.mox.ReplayAll() if associated: expected = self.port_data2[1]['device_id'] else: expected = None fip = api.get_instance_id_by_floating_address(self.context, address) self.assertEqual(expected, fip) def test_get_instance_id_by_floating_address(self): self._test_get_instance_id_by_floating_address(self.fip_unassociated) def test_get_instance_id_by_floating_address_associated(self): self._test_get_instance_id_by_floating_address(self.fip_associated, associated=True) def test_allocate_floating_ip(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context, 'ext_net') self.assertEqual(fip, self.fip_unassociated['floating_ip_address']) def test_allocate_floating_ip_addr_gen_fail(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndRaise(exceptions.IpAddressGenerationFailureClient) self.mox.ReplayAll() self.assertRaises(exception.NoMoreFloatingIps, api.allocate_floating_ip, self.context, 'ext_net') def test_allocate_floating_ip_exhausted_fail(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndRaise(exceptions.ExternalIpAddressExhaustedClient) self.mox.ReplayAll() self.assertRaises(exception.NoMoreFloatingIps, api.allocate_floating_ip, self.context, 'ext_net') def test_allocate_floating_ip_with_pool_id(self): api = neutronapi.API() pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'id': pool_id} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context, pool_id) self.assertEqual(fip, self.fip_unassociated['floating_ip_address']) def test_allocate_floating_ip_with_default_pool(self): api = neutronapi.API() pool_name = self.fip_pool_nova['name'] pool_id = self.fip_pool_nova['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool_nova]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context) self.assertEqual(fip, self.fip_unassociated['floating_ip_address']) def test_release_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fip_id = self.fip_unassociated['id'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.delete_floatingip(fip_id) self.mox.ReplayAll() api.release_floating_ip(self.context, address) def test_disassociate_and_release_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fip_id = self.fip_unassociated['id'] floating_ip = {'address': address} self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.delete_floatingip(fip_id) self.mox.ReplayAll() api.disassociate_and_release_floating_ip(self.context, None, floating_ip) def test_release_floating_ip_associated(self): api = neutronapi.API() address = self.fip_associated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpAssociated, api.release_floating_ip, self.context, address) def _setup_mock_for_refresh_cache(self, api, instances): nw_info = model.NetworkInfo() self.mox.StubOutWithMock(api, '_get_instance_nw_info') self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') for instance in instances: api._get_instance_nw_info(mox.IgnoreArg(), instance).\ AndReturn(nw_info) api.db.instance_info_cache_update(mox.IgnoreArg(), instance['uuid'], mox.IgnoreArg()).AndReturn( fake_info_cache) def test_associate_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fixed_address = self.port_address2 fip_id = self.fip_unassociated['id'] instance = self._fake_instance_object(self.instance) search_opts = {'device_owner': 'compute:nova', 'device_id': instance.uuid} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[1]]}) self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'], 'fixed_ip_address': fixed_address}}) self._setup_mock_for_refresh_cache(api, [instance]) self.mox.ReplayAll() api.associate_floating_ip(self.context, instance, address, fixed_address) @mock.patch('nova.objects.Instance.get_by_uuid') def test_reassociate_floating_ip(self, mock_get): api = neutronapi.API() address = self.fip_associated['floating_ip_address'] new_fixed_address = self.port_address fip_id = self.fip_associated['id'] search_opts = {'device_owner': 'compute:nova', 'device_id': self.instance2['uuid']} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[0]]}) self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': 'my_portid1', 'fixed_ip_address': new_fixed_address}}) self.moxed_client.show_port(self.fip_associated['port_id']).\ AndReturn({'port': self.port_data2[1]}) mock_get.return_value = fake_instance.fake_instance_obj( self.context, **self.instance) instance2 = self._fake_instance_object(self.instance2) self._setup_mock_for_refresh_cache(api, [mock_get.return_value, instance2]) self.mox.ReplayAll() api.associate_floating_ip(self.context, instance2, address, new_fixed_address) def test_associate_floating_ip_not_found_fixed_ip(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() address = self.fip_associated['floating_ip_address'] fixed_address = self.fip_associated['fixed_ip_address'] search_opts = {'device_owner': 'compute:nova', 'device_id': self.instance['uuid']} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[0]]}) self.mox.ReplayAll() self.assertRaises(exception.FixedIpNotFoundForAddress, api.associate_floating_ip, self.context, instance, address, fixed_address) def test_disassociate_floating_ip(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() address = self.fip_associated['floating_ip_address'] fip_id = self.fip_associated['id'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': None}}) self._setup_mock_for_refresh_cache(api, [instance]) self.mox.ReplayAll() api.disassociate_floating_ip(self.context, instance, address) def test_add_fixed_ip_to_instance(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() self._setup_mock_for_refresh_cache(api, [instance]) network_id = 'my_netid1' search_opts = {'network_id': network_id} self.moxed_client.list_subnets( **search_opts).AndReturn({'subnets': self.subnet_data_n}) search_opts = {'device_id': instance.uuid, 'device_owner': 'compute:nova', 'network_id': network_id} self.moxed_client.list_ports( **search_opts).AndReturn({'ports': self.port_data1}) port_req_body = { 'port': { 'fixed_ips': [{'subnet_id': 'my_subid1'}, {'subnet_id': 'my_subid1'}], }, } port = self.port_data1[0] port['fixed_ips'] = [{'subnet_id': 'my_subid1'}] self.moxed_client.update_port('my_portid1', MyComparator(port_req_body)).AndReturn({'port': port}) self.mox.ReplayAll() api.add_fixed_ip_to_instance(self.context, instance, network_id) def test_remove_fixed_ip_from_instance(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() self._setup_mock_for_refresh_cache(api, [instance]) address = '10.0.0.3' zone = 'compute:%s' % self.instance['availability_zone'] search_opts = {'device_id': self.instance['uuid'], 'device_owner': zone, 'fixed_ips': 'ip_address=%s' % address} self.moxed_client.list_ports( **search_opts).AndReturn({'ports': self.port_data1}) port_req_body = { 'port': { 'fixed_ips': [], }, } port = self.port_data1[0] port['fixed_ips'] = [] self.moxed_client.update_port('my_portid1', MyComparator(port_req_body)).AndReturn({'port': port}) self.mox.ReplayAll() api.remove_fixed_ip_from_instance(self.context, instance, address) def test_list_floating_ips_without_l3_support(self): api = neutronapi.API() NeutronNotFound = exceptions.NeutronClientException( status_code=404) self.moxed_client.list_floatingips( fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound) self.mox.ReplayAll() neutronapi.get_client('fake') floatingips = api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', 1) self.assertEqual(floatingips, []) def test_nw_info_get_ips(self): fake_port = { 'fixed_ips': [ {'ip_address': '1.1.1.1'}], 'id': 'port-id', } api = neutronapi.API() self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port') api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', 'port-id').AndReturn( [{'floating_ip_address': '10.0.0.1'}]) self.mox.ReplayAll() neutronapi.get_client('fake') result = api._nw_info_get_ips(self.moxed_client, fake_port) self.assertEqual(len(result), 1) self.assertEqual(result[0]['address'], '1.1.1.1') self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1') def test_nw_info_get_subnets(self): fake_port = { 'fixed_ips': [ {'ip_address': '1.1.1.1'}, {'ip_address': '2.2.2.2'}], 'id': 'port-id', } fake_subnet = model.Subnet(cidr='1.0.0.0/8') fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']] api = neutronapi.API() self.mox.StubOutWithMock(api, '_get_subnets_from_port') api._get_subnets_from_port(self.context, fake_port).AndReturn( [fake_subnet]) self.mox.ReplayAll() neutronapi.get_client('fake') subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips) self.assertEqual(len(subnets), 1) self.assertEqual(len(subnets[0]['ips']), 1) self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1') def _test_nw_info_build_network(self, vif_type): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': vif_type, } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(net['subnets'], fake_subnets) self.assertEqual(net['id'], 'net-id') self.assertEqual(net['label'], 'foo') self.assertEqual(net.get_meta('tenant_id'), 'tenant') self.assertEqual(net.get_meta('injected'), CONF.flat_injected) return net, iid def test_nw_info_build_network_ovs(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS) self.assertEqual(net['bridge'], CONF.neutron.ovs_bridge) self.assertNotIn('should_create_bridge', net) self.assertEqual(iid, 'port-id') def test_nw_info_build_network_dvs(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS) self.assertEqual('net-id', net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertNotIn('ovs_interfaceid', net) self.assertIsNone(iid) def test_nw_info_build_network_bridge(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE) self.assertEqual(net['bridge'], 'brqnet-id') self.assertTrue(net['should_create_bridge']) self.assertIsNone(iid) def test_nw_info_build_network_tap(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_TAP) self.assertIsNone(net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertIsNone(iid) def test_nw_info_build_network_other(self): net, iid = self._test_nw_info_build_network(None) self.assertIsNone(net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertIsNone(iid) def test_nw_info_build_no_match(self): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id1', 'tenant_id': 'tenant', 'binding:vif_type': model.VIF_TYPE_OVS, } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(fake_subnets, net['subnets']) self.assertEqual('net-id1', net['id']) self.assertEqual('net-id1', net['id']) self.assertEqual('tenant', net['meta']['tenant_id']) def test_nw_info_build_network_vhostuser(self): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': model.VIF_TYPE_VHOSTUSER, 'binding:vif_details': { model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True } } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(net['subnets'], fake_subnets) self.assertEqual(net['id'], 'net-id') self.assertEqual(net['label'], 'foo') self.assertEqual(net.get_meta('tenant_id'), 'tenant') self.assertEqual(net.get_meta('injected'), CONF.flat_injected) self.assertEqual(net['bridge'], CONF.neutron.ovs_bridge) self.assertNotIn('should_create_bridge', net) self.assertEqual(iid, 'port-id') def test_build_network_info_model(self): api = neutronapi.API() fake_inst = objects.Instance() fake_inst.project_id = 'fake' fake_inst.uuid = 'uuid' fake_inst.info_cache = objects.InstanceInfoCache() fake_inst.info_cache.network_info = model.NetworkInfo() fake_ports = [ # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port1', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:01', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=False and status='DOWN' thus vif.active=True {'id': 'port2', 'network_id': 'net-id', 'admin_state_up': False, 'status': 'DOWN', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:02', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=True and status='DOWN' thus vif.active=False {'id': 'port0', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'DOWN', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:03', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port3', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:04', 'binding:vif_type': model.VIF_TYPE_HW_VEB, 'binding:vnic_type': model.VNIC_TYPE_DIRECT, 'binding:profile': {'pci_vendor_info': '1137:0047', 'pci_slot': '0000:0a:00.1', 'physical_network': 'phynet1'}, 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port4', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:05', 'binding:vif_type': model.VIF_TYPE_802_QBH, 'binding:vnic_type': model.VNIC_TYPE_MACVTAP, 'binding:profile': {'pci_vendor_info': '1137:0047', 'pci_slot': '0000:0a:00.2', 'physical_network': 'phynet1'}, 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True # This port has no binding:vnic_type to verify default is assumed {'id': 'port5', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:06', 'binding:vif_type': model.VIF_TYPE_BRIDGE, # No binding:vnic_type 'binding:vif_details': {}, }, # This does not match the networks we provide below, # so it should be ignored (and is here to verify that) {'id': 'port6', 'network_id': 'other-net-id', 'admin_state_up': True, 'status': 'DOWN', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, }, ] fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [ {'id': 'net-id', 'name': 'foo', 'tenant_id': 'fake', } ] neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes( ).AndReturn(self.moxed_client) self.moxed_client.list_ports( tenant_id='fake', device_id='uuid').AndReturn( {'ports': fake_ports}) self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port') self.mox.StubOutWithMock(api, '_get_subnets_from_port') requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1], fake_ports[3], fake_ports[4], fake_ports[5]] for requested_port in requested_ports: api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn( [{'floating_ip_address': '10.0.0.1'}]) for requested_port in requested_ports: api._get_subnets_from_port(self.context, requested_port ).AndReturn(fake_subnets) self.mox.StubOutWithMock(api, '_get_preexisting_port_ids') api._get_preexisting_port_ids(fake_inst).AndReturn(['port5']) self.mox.ReplayAll() neutronapi.get_client('fake') fake_inst.info_cache = objects.InstanceInfoCache.new( self.context, 'fake-uuid') fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([]) nw_infos = api._build_network_info_model( self.context, fake_inst, fake_nets, [fake_ports[2]['id'], fake_ports[0]['id'], fake_ports[1]['id'], fake_ports[3]['id'], fake_ports[4]['id'], fake_ports[5]['id']], preexisting_port_ids=['port3']) self.assertEqual(len(nw_infos), 6) index = 0 for nw_info in nw_infos: self.assertEqual(nw_info['address'], requested_ports[index]['mac_address']) self.assertEqual(nw_info['devname'], 'tapport' + str(index)) self.assertIsNone(nw_info['ovs_interfaceid']) self.assertEqual(nw_info['type'], requested_ports[index]['binding:vif_type']) if nw_info['type'] == model.VIF_TYPE_BRIDGE: self.assertEqual(nw_info['network']['bridge'], 'brqnet-id') self.assertEqual(nw_info['vnic_type'], requested_ports[index].get('binding:vnic_type', model.VNIC_TYPE_NORMAL)) self.assertEqual(nw_info.get('details'), requested_ports[index].get('binding:vif_details')) self.assertEqual(nw_info.get('profile'), requested_ports[index].get('binding:profile')) index += 1 self.assertEqual(nw_infos[0]['active'], False) self.assertEqual(nw_infos[1]['active'], True) self.assertEqual(nw_infos[2]['active'], True) self.assertEqual(nw_infos[3]['active'], True) self.assertEqual(nw_infos[4]['active'], True) self.assertEqual(nw_infos[5]['active'], True) self.assertEqual(nw_infos[0]['id'], 'port0') self.assertEqual(nw_infos[1]['id'], 'port1') self.assertEqual(nw_infos[2]['id'], 'port2') self.assertEqual(nw_infos[3]['id'], 'port3') self.assertEqual(nw_infos[4]['id'], 'port4') self.assertEqual(nw_infos[5]['id'], 'port5') self.assertFalse(nw_infos[0]['preserve_on_delete']) self.assertFalse(nw_infos[1]['preserve_on_delete']) self.assertFalse(nw_infos[2]['preserve_on_delete']) self.assertTrue(nw_infos[3]['preserve_on_delete']) self.assertFalse(nw_infos[4]['preserve_on_delete']) self.assertTrue(nw_infos[5]['preserve_on_delete']) @mock.patch('nova.network.neutronv2.api.API._nw_info_get_subnets') @mock.patch('nova.network.neutronv2.api.API._nw_info_get_ips') @mock.patch('nova.network.neutronv2.api.API._nw_info_build_network') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API._gather_port_ids_and_networks') def test_build_network_info_model_empty( self, mock_gather_port_ids_and_networks, mock_get_preexisting_port_ids, mock_nw_info_build_network, mock_nw_info_get_ips, mock_nw_info_get_subnets): api = neutronapi.API() fake_inst = objects.Instance() fake_inst.project_id = 'fake' fake_inst.uuid = 'uuid' fake_inst.info_cache = objects.InstanceInfoCache() fake_inst.info_cache.network_info = model.NetworkInfo() fake_ports = [ # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port1', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:01', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, ] fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes( ).AndReturn(self.moxed_client) self.moxed_client.list_ports( tenant_id='fake', device_id='uuid').AndReturn( {'ports': fake_ports}) mock_gather_port_ids_and_networks.return_value = (None, None) mock_get_preexisting_port_ids.return_value = [] mock_nw_info_build_network.return_value = (None, None) mock_nw_info_get_ips.return_value = [] mock_nw_info_get_subnets.return_value = fake_subnets self.mox.ReplayAll() neutronapi.get_client('fake') nw_infos = api._build_network_info_model( self.context, fake_inst) self.assertEqual(len(nw_infos), 1) def test_get_subnets_from_port(self): api = neutronapi.API() port_data = copy.copy(self.port_data1[0]) subnet_data1 = copy.copy(self.subnet_data1) subnet_data1[0]['host_routes'] = [ {'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'} ] self.moxed_client.list_subnets( id=[port_data['fixed_ips'][0]['subnet_id']] ).AndReturn({'subnets': subnet_data1}) self.moxed_client.list_ports( network_id=subnet_data1[0]['network_id'], device_owner='network:dhcp').AndReturn({'ports': []}) self.mox.ReplayAll() subnets = api._get_subnets_from_port(self.context, port_data) self.assertEqual(len(subnets), 1) self.assertEqual(len(subnets[0]['routes']), 1) self.assertEqual(subnets[0]['routes'][0]['cidr'], subnet_data1[0]['host_routes'][0]['destination']) self.assertEqual(subnets[0]['routes'][0]['gateway']['address'], subnet_data1[0]['host_routes'][0]['nexthop']) def test_get_all_empty_list_networks(self): api = neutronapi.API() self.moxed_client.list_networks().AndReturn({'networks': []}) self.mox.ReplayAll() networks = api.get_all(self.context) self.assertIsInstance(networks, objects.NetworkList) self.assertEqual(0, len(networks)) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_1(self, mock_get_client): api = neutronapi.API() self.mox.ResetAll() test_port = { 'port': {'id': 'my_port_id1', 'network_id': 'net-id', 'binding:vnic_type': model.VNIC_TYPE_DIRECT, }, } test_net = {'network': {'provider:physical_network': 'phynet1'}} mock_client = mock_get_client() mock_client.show_port.return_value = test_port mock_client.show_network.return_value = test_net vnic_type, phynet_name = api._get_port_vnic_info( self.context, mock_client, test_port['port']['id']) mock_client.show_port.assert_called_once_with(test_port['port']['id'], fields=['binding:vnic_type', 'network_id']) mock_client.show_network.assert_called_once_with( test_port['port']['network_id'], fields='provider:physical_network') self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type) self.assertEqual(phynet_name, 'phynet1') def _test_get_port_vnic_info(self, mock_get_client, binding_vnic_type=None): api = neutronapi.API() self.mox.ResetAll() test_port = { 'port': {'id': 'my_port_id2', 'network_id': 'net-id', }, } if binding_vnic_type: test_port['port']['binding:vnic_type'] = binding_vnic_type mock_get_client.reset_mock() mock_client = mock_get_client() mock_client.show_port.return_value = test_port vnic_type, phynet_name = api._get_port_vnic_info( self.context, mock_client, test_port['port']['id']) mock_client.show_port.assert_called_once_with(test_port['port']['id'], fields=['binding:vnic_type', 'network_id']) self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type) self.assertFalse(phynet_name) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_2(self, mock_get_client): self._test_get_port_vnic_info(mock_get_client, binding_vnic_type=model.VNIC_TYPE_NORMAL) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_3(self, mock_get_client): self._test_get_port_vnic_info(mock_get_client) @mock.patch.object(neutronapi.API, "_get_port_vnic_info") @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_create_pci_requests_for_sriov_ports(self, mock_get_client, mock_get_port_vnic_info): api = neutronapi.API() self.mox.ResetAll() requested_networks = objects.NetworkRequestList( objects = [ objects.NetworkRequest(port_id='my_portid1'), objects.NetworkRequest(network_id='net1'), objects.NetworkRequest(port_id='my_portid2'), objects.NetworkRequest(port_id='my_portid3'), objects.NetworkRequest(port_id='my_portid4')]) pci_requests = objects.InstancePCIRequests(requests=[]) mock_get_port_vnic_info.side_effect = [ (model.VNIC_TYPE_DIRECT, 'phynet1'), (model.VNIC_TYPE_NORMAL, ''), (model.VNIC_TYPE_MACVTAP, 'phynet1'), (model.VNIC_TYPE_MACVTAP, 'phynet2') ] api.create_pci_requests_for_sriov_ports( None, pci_requests, requested_networks) self.assertEqual(3, len(pci_requests.requests)) has_pci_request_id = [net.pci_request_id is not None for net in requested_networks.objects] expected_results = [True, False, False, True, True] self.assertEqual(expected_results, has_pci_request_id) class TestNeutronv2WithMock(test.TestCase): """Used to test Neutron V2 API with mock.""" def setUp(self): super(TestNeutronv2WithMock, self).setUp() self.api = neutronapi.API() self.context = context.RequestContext( 'fake-user', 'fake-project', auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936') @mock.patch('oslo_concurrency.lockutils.lock') def test_get_instance_nw_info_locks_per_instance(self, mock_lock): instance = objects.Instance(uuid=uuid.uuid4()) api = neutronapi.API() mock_lock.side_effect = test.TestingException self.assertRaises(test.TestingException, api.get_instance_nw_info, 'context', instance) mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(neutronapi.API, '_get_instance_nw_info') @mock.patch('nova.network.base_api.update_instance_cache_with_nw_info') def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock): fake_result = mock.sentinel.get_nw_info_result mock_get.return_value = fake_result instance = fake_instance.fake_instance_obj(self.context) result = self.api.get_instance_nw_info(self.context, instance) mock_get.assert_called_once_with(self.context, instance) mock_update.assert_called_once_with(self.api, self.context, instance, nw_info=fake_result, update_cells=False) self.assertEqual(fake_result, result) def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks, ids, list_port_values): def _fake_list_ports(**search_opts): for args, return_value in list_port_values: if args == search_opts: return return_value self.fail('Unexpected call to list_ports %s' % search_opts) with contextlib.nested( mock.patch.object(client.Client, 'list_ports', side_effect=_fake_list_ports), mock.patch.object(client.Client, 'list_networks', return_value={'networks': nets}), mock.patch.object(client.Client, 'show_quota', return_value={'quota': {'port': 50}})) as ( list_ports_mock, list_networks_mock, show_quota_mock): self.api.validate_networks(self.context, requested_networks, 1) self.assertEqual(len(list_port_values), len(list_ports_mock.call_args_list)) list_networks_mock.assert_called_once_with(id=ids) show_quota_mock.assert_called_once_with(tenant_id='fake-project') def test_validate_networks_over_limit_quota(self): """Test validates that a relevant exception is being raised when there are more ports defined, than there is a quota for it. """ requested_networks = [('my_netid1', '10.0.1.2', None, None), ('my_netid2', '10.0.1.3', None, None)] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'network_id': 'my_netid2', 'fixed_ips': 'ip_address=10.0.1.3', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project'}, {'ports': [1, 2, 3, 4, 5]})] nets = [{'subnets': '1'}, {'subnets': '2'}] def _fake_list_ports(**search_opts): for args, return_value in list_port_values: if args == search_opts: return return_value with contextlib.nested( mock.patch.object(self.api, '_get_available_networks', return_value=nets), mock.patch.object(client.Client, 'list_ports', side_effect=_fake_list_ports), mock.patch.object(client.Client, 'show_quota', return_value={'quota': {'port': 1}})): self.assertRaises(exception.PortLimitExceeded, self.api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_fixed_ip_no_dup1(self): # Test validation for a request for a network with a # fixed ip that is not already in use because no fixed ips in use nets1 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'fake-project'}] requested_networks = [('my_netid1', '10.0.1.2', None, None)] ids = ['my_netid1'] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project'}, {'ports': []})] self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks, ids, list_port_values) def test_validate_networks_fixed_ip_no_dup2(self): # Test validation for a request for a network with a # fixed ip that is not already in use because not used on this net id nets2 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'fake-project'}, {'id': 'my_netid2', 'name': 'my_netname2', 'subnets': ['mysubnid2'], 'tenant_id': 'fake-project'}] requested_networks = [('my_netid1', '10.0.1.2', None, None), ('my_netid2', '10.0.1.3', None, None)] ids = ['my_netid1', 'my_netid2'] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'network_id': 'my_netid2', 'fixed_ips': 'ip_address=10.0.1.3', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project'}, {'ports': []})] self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks, ids, list_port_values) def test_validate_networks_fixed_ip_dup(self): # Test validation for a request for a network with a # fixed ip that is already in use requested_networks = [('my_netid1', '10.0.1.2', None, None)] list_port_mock_params = {'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'} list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]} with mock.patch.object(client.Client, 'list_ports', return_value=list_port_mock_return) as ( list_ports_mock): self.assertRaises(exception.FixedIpAlreadyInUse, self.api.validate_networks, self.context, requested_networks, 1) list_ports_mock.assert_called_once_with(**list_port_mock_params) def test_allocate_floating_ip_exceed_limit(self): # Verify that the correct exception is thrown when quota exceed pool_name = 'dummy' api = neutronapi.API() with contextlib.nested( mock.patch.object(client.Client, 'create_floatingip'), mock.patch.object(api, '_get_floating_ip_pool_id_by_name_or_id')) as ( create_mock, get_mock): create_mock.side_effect = exceptions.OverQuotaClient() self.assertRaises(exception.FloatingIpLimitExceeded, api.allocate_floating_ip, self.context, pool_name) def test_create_port_for_instance_no_more_ip(self): instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} with mock.patch.object(client.Client, 'create_port', side_effect=exceptions.IpAddressGenerationFailureClient()) as ( create_port_mock): zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone}} self.assertRaises(exception.NoMoreFixedIps, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body) create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.MacAddressInUseClient()) def test_create_port_for_instance_mac_address_in_use(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} available_macs = set(['XX:XX:XX:XX:XX:XX']) # Run the code. self.assertRaises(exception.PortInUse, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, available_macs=available_macs) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.IpAddressInUseClient()) def test_create_port_for_fixed_ip_in_use(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} fake_ip = '1.1.1.1' # Run the code. self.assertRaises(exception.FixedIpAlreadyInUse, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, fixed_ip=fake_ip) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.InvalidIpForNetworkClient()) def test_create_port_with_invalid_ip_for_network(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} fake_ip = '1.1.1.1' # Run the code. self.assertRaises(exception.InvalidInput, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, fixed_ip=fake_ip) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) def test_get_network_detail_not_found(self): api = neutronapi.API() expected_exc = exceptions.NetworkNotFoundClient() network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786' with mock.patch.object(client.Client, 'show_network', side_effect=expected_exc) as ( fake_show_network): self.assertRaises(exception.NetworkNotFound, api.get, self.context, network_uuid) fake_show_network.assert_called_once_with(network_uuid) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API.' '_refresh_neutron_extensions_cache') def test_deallocate_for_instance_uses_delete_helper(self, mock_refresh, mock_preexisting): # setup fake data instance = fake_instance.fake_instance_obj(self.context) mock_preexisting.return_value = [] port_data = {'ports': [{'id': str(uuid.uuid4())}]} ports = set([port['id'] for port in port_data.get('ports')]) api = neutronapi.API() # setup mocks mock_client = mock.Mock() mock_client.list_ports.return_value = port_data with contextlib.nested( mock.patch.object(neutronapi, 'get_client', return_value=mock_client), mock.patch.object(api, '_delete_ports') ) as ( mock_get_client, mock_delete ): # run the code api.deallocate_for_instance(self.context, instance) # assert the calls mock_client.list_ports.assert_called_once_with( device_id=instance.uuid) mock_delete.assert_called_once_with( mock_client, instance, ports, raise_if_fail=True) def _test_delete_ports(self, expect_raise): results = [exceptions.NeutronClientException, None] mock_client = mock.Mock() with mock.patch.object(mock_client, 'delete_port', side_effect=results): api = neutronapi.API() api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'], raise_if_fail=expect_raise) def test_delete_ports_raise(self): self.assertRaises(exceptions.NeutronClientException, self._test_delete_ports, True) def test_delete_ports_no_raise(self): self._test_delete_ports(False) def test_delete_ports_never_raise_404(self): mock_client = mock.Mock() mock_client.delete_port.side_effect = exceptions.PortNotFoundClient api = neutronapi.API() api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'], raise_if_fail=True) mock_client.delete_port.assert_called_once_with('port1') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_port_for_instance_fails(self, mock_preexisting): mock_preexisting.return_value = [] mock_client = mock.Mock() api = neutronapi.API() with contextlib.nested( mock.patch.object(neutronapi, 'get_client', return_value=mock_client), mock.patch.object(api, '_delete_ports', side_effect=exceptions.Unauthorized), mock.patch.object(api, 'get_instance_nw_info') ) as ( get_client, delete_ports, get_nw_info ): self.assertRaises(exceptions.Unauthorized, api.deallocate_port_for_instance, self.context, instance={'uuid': 'fake'}, port_id='fake') # make sure that we didn't try to reload nw info self.assertFalse(get_nw_info.called) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def _test_show_port_exceptions(self, client_exc, expected_nova_exc, get_client_mock): show_port_mock = mock.Mock(side_effect=client_exc) get_client_mock.return_value.show_port = show_port_mock self.assertRaises(expected_nova_exc, self.api.show_port, self.context, 'fake_port_id') def test_show_port_not_found(self): self._test_show_port_exceptions(exceptions.PortNotFoundClient, exception.PortNotFound) def test_show_port_forbidden(self): self._test_show_port_exceptions(exceptions.Unauthorized, exception.Forbidden) def test_show_port_unknown_exception(self): self._test_show_port_exceptions(exceptions.NeutronClientException, exception.NovaException) def test_get_network(self): api = neutronapi.API() with mock.patch.object(client.Client, 'show_network') as mock_show: mock_show.return_value = { 'network': {'id': 'fake-uuid', 'name': 'fake-network'} } net_obj = api.get(self.context, 'fake-uuid') self.assertEqual('fake-network', net_obj.label) self.assertEqual('fake-network', net_obj.name) self.assertEqual('fake-uuid', net_obj.uuid) def test_get_all_networks(self): api = neutronapi.API() with mock.patch.object(client.Client, 'list_networks') as mock_list: mock_list.return_value = { 'networks': [ {'id': 'fake-uuid1', 'name': 'fake-network1'}, {'id': 'fake-uuid2', 'name': 'fake-network2'}, ]} net_objs = api.get_all(self.context) self.assertIsInstance(net_objs, objects.NetworkList) self.assertEqual(2, len(net_objs)) self.assertEqual(('fake-uuid1', 'fake-network1'), (net_objs[0].uuid, net_objs[0].name)) self.assertEqual(('fake-uuid2', 'fake-network2'), (net_objs[1].uuid, net_objs[1].name)) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_update_port_bindings_for_instance_same_host(self, get_client_mock): instance = fake_instance.fake_instance_obj(self.context) self.api._has_port_binding_extension = mock.Mock(return_value=True) # We test two ports, one with the same host as the host passed in and # one where binding:host_id isn't set, so we update that port. fake_ports = {'ports': [ {'id': 'fake-port-1', 'binding:host_id': instance.host}, {'id': 'fake-port-2'}]} list_ports_mock = mock.Mock(return_value=fake_ports) get_client_mock.return_value.list_ports = list_ports_mock update_port_mock = mock.Mock() get_client_mock.return_value.update_port = update_port_mock self.api._update_port_binding_for_instance(self.context, instance, instance.host) # Assert that update_port was only called on the port without a host. update_port_mock.assert_called_once_with( 'fake-port-2', {'port': {'binding:host_id': instance.host}}) @mock.patch('nova.network.neutronv2.api.compute_utils') def test_get_preexisting_port_ids(self, mocked_comp_utils): mocked_comp_utils.get_nw_info_for_instance.return_value = [model.VIF( id='1', preserve_on_delete=False), model.VIF( id='2', preserve_on_delete=True), model.VIF( id='3', preserve_on_delete=True)] result = self.api._get_preexisting_port_ids(None) self.assertEqual(['2', '3'], result, "Invalid preexisting ports") def _test_unbind_ports_get_client(self, mock_neutron, mock_has_ext, has_ext=False): mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = has_ext ports = ["1", "2", "3"] self.api._unbind_ports(mock_ctx, ports, mock_neutron) get_client_calls = [] get_client_calls.append(mock.call(mock_ctx) if not has_ext else mock.call(mock_ctx, admin=True)) if has_ext: self.assertEqual(1, mock_neutron.call_count) mock_neutron.assert_has_calls(get_client_calls, True) else: self.assertEqual(0, mock_neutron.call_count) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_get_client_binding_extension(self, mock_neutron, mock_has_ext): self._test_unbind_ports_get_client(mock_neutron, mock_has_ext, True) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_get_client(self, mock_neutron, mock_has_ext): self._test_unbind_ports_get_client(mock_neutron, mock_has_ext) def _test_unbind_ports(self, mock_neutron, mock_has_ext, has_ext=False): mock_client = mock.Mock() mock_update_port = mock.Mock() mock_client.update_port = mock_update_port mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = has_ext mock_neutron.return_value = mock_client ports = ["1", "2", "3"] api = neutronapi.API() api._unbind_ports(mock_ctx, ports, mock_client) body = {'port': {'device_id': '', 'device_owner': ''}} if has_ext: body['port']['binding:host_id'] = None update_port_calls = [] for p in ports: update_port_calls.append(mock.call(p, body)) self.assertEqual(3, mock_update_port.call_count) mock_update_port.assert_has_calls(update_port_calls) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_binding_ext(self, mock_neutron, mock_has_ext): self._test_unbind_ports(mock_neutron, mock_has_ext, True) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports(self, mock_neutron, mock_has_ext): self._test_unbind_ports(mock_neutron, mock_has_ext, False) @mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info') @mock.patch('nova.network.neutronv2.api.excutils') @mock.patch('nova.network.neutronv2.api.API._delete_ports') @mock.patch('nova.network.neutronv2.api.API.' '_check_external_network_attach') @mock.patch('nova.network.neutronv2.api.LOG') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.API.' '_populate_neutron_extension_values') @mock.patch('nova.network.neutronv2.api.API._get_available_networks') @mock.patch('nova.network.neutronv2.api.get_client') def test_allocate_for_instance_unbind(self, mock_ntrn, mock_avail_nets, mock_ext_vals, mock_has_pbe, mock_unbind, mock_log, mock_cena, mock_del_ports, mock_exeu, mock_giwn): mock_nc = mock.Mock() def show_port(port_id): return {'port': {'network_id': 'net-1', 'id': port_id, 'tenant_id': 'proj-1'}} mock_nc.show_port = show_port mock_ntrn.return_value = mock_nc mock_nc.update_port.side_effect = [True, True, Exception] mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_has_pbe.return_value = False nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(port_id='fake-port1'), objects.NetworkRequest(port_id='fake-port2'), objects.NetworkRequest(port_id='fail-port')]) mock_avail_nets.return_value = [{'id': 'net-1'}] self.api.allocate_for_instance(mock.sentinel.ctx, mock_inst, requested_networks=nw_req) mock_unbind.assert_called_once_with(mock.sentinel.ctx, ['fake-port1', 'fake-port2'], mock.ANY, mock.ANY) @mock.patch('nova.objects.network_request.utils') @mock.patch('nova.network.neutronv2.api.LOG') @mock.patch('nova.network.neutronv2.api.base_api') @mock.patch('nova.network.neutronv2.api.API._delete_ports') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.get_client') def test_preexisting_deallocate_for_instance(self, mock_ntrn, mock_gppids, mock_unbind, mock_deletep, mock_baseapi, mock_log, req_utils): req_utils.is_neutron.return_value = True mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_ports.return_value = {'ports': [ {'id': 'port-1'}, {'id': 'port-2'}, {'id': 'port-3'} ]} nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(network_id='net-1', address='192.168.0.3', port_id='port-1', pci_request_id='pci-1')]) mock_gppids.return_value = ['port-3'] self.api.deallocate_for_instance(mock.sentinel.ctx, mock_inst, requested_networks=nw_req) mock_unbind.assert_called_once_with(mock.sentinel.ctx, set(['port-1', 'port-3']), mock.ANY) mock_deletep.assert_called_once_with(mock_nc, mock_inst, set(['port-2']), raise_if_fail=True) @mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.compute_utils') @mock.patch('nova.network.neutronv2.api.get_client') def test_preexisting_deallocate_port_for_instance(self, mock_ntrn, mock_comp_utils, mock_unbind, mock_netinfo): mock_comp_utils.get_nw_info_for_instance.return_value = [model.VIF( id='1', preserve_on_delete=False), model.VIF( id='2', preserve_on_delete=True), model.VIF( id='3', preserve_on_delete=True)] mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_client = mock.Mock() mock_ntrn.return_value = mock_client self.api.deallocate_port_for_instance(mock.sentinel.ctx, mock_inst, '2') mock_unbind.assert_called_once_with(mock.sentinel.ctx, ['2'], mock_client) class TestNeutronv2ModuleMethods(test.NoDBTestCase): def test_gather_port_ids_and_networks_wrong_params(self): api = neutronapi.API() # Test with networks not None and port_ids is None self.assertRaises(exception.NovaException, api._gather_port_ids_and_networks, 'fake_context', 'fake_instance', [{'network': {'name': 'foo'}}], None) # Test with networks is None and port_ids not None self.assertRaises(exception.NovaException, api._gather_port_ids_and_networks, 'fake_context', 'fake_instance', None, ['list', 'of', 'port_ids']) def test_ensure_requested_network_ordering_no_preference_ids(self): l = [1, 2, 3] neutronapi._ensure_requested_network_ordering( lambda x: x, l, None) def test_ensure_requested_network_ordering_no_preference_hashes(self): l = [{'id': 3}, {'id': 1}, {'id': 2}] neutronapi._ensure_requested_network_ordering( lambda x: x['id'], l, None) self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}]) def test_ensure_requested_network_ordering_with_preference(self): l = [{'id': 3}, {'id': 1}, {'id': 2}] neutronapi._ensure_requested_network_ordering( lambda x: x['id'], l, [1, 2, 3]) self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}]) class TestNeutronv2Portbinding(TestNeutronv2Base): def test_allocate_for_instance_portbinding(self): self._allocate_for_instance(1, portbinding=True) def test_populate_neutron_extension_values_binding(self): api = neutronapi.API() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.PORTBINDING_EXT}]}) self.mox.ReplayAll() host_id = 'my_host_id' instance = {'host': host_id} port_req_body = {'port': {}} api._populate_neutron_extension_values(self.context, instance, None, port_req_body) self.assertEqual(port_req_body['port']['binding:host_id'], host_id) self.assertFalse(port_req_body['port'].get('binding:profile')) @mock.patch.object(pci_whitelist, 'get_pci_device_devspec') @mock.patch.object(pci_manager, 'get_instance_pci_devs') def test_populate_neutron_extension_values_binding_sriov(self, mock_get_instance_pci_devs, mock_get_pci_device_devspec): api = neutronapi.API() host_id = 'my_host_id' instance = {'host': host_id} port_req_body = {'port': {}} pci_req_id = 'my_req_id' pci_dev = {'vendor_id': '1377', 'product_id': '0047', 'address': '0000:0a:00.1', } PciDevice = collections.namedtuple('PciDevice', ['vendor_id', 'product_id', 'address']) mydev = PciDevice(**pci_dev) profile = {'pci_vendor_info': '1377:0047', 'pci_slot': '0000:0a:00.1', 'physical_network': 'phynet1', } mock_get_instance_pci_devs.return_value = [mydev] devspec = mock.Mock() devspec.get_tags.return_value = {'physical_network': 'phynet1'} mock_get_pci_device_devspec.return_value = devspec api._populate_neutron_binding_profile(instance, pci_req_id, port_req_body) self.assertEqual(port_req_body['port']['binding:profile'], profile) def _test_update_port_binding_false(self, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(False) self.mox.ReplayAll() func(*args) def _test_update_port_binding_true(self, expected_bind_host, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(True) neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) search_opts = {'device_id': self.instance['uuid'], 'tenant_id': self.instance['project_id']} ports = {'ports': [{'id': 'test1'}]} self.moxed_client.list_ports(**search_opts).AndReturn(ports) port_req_body = {'port': {'binding:host_id': expected_bind_host}} self.moxed_client.update_port('test1', port_req_body).AndReturn(None) self.mox.ReplayAll() func(*args) def _test_update_port_true_exception(self, expected_bind_host, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(True) neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) search_opts = {'device_id': self.instance['uuid'], 'tenant_id': self.instance['project_id']} ports = {'ports': [{'id': 'test1'}]} self.moxed_client.list_ports(**search_opts).AndReturn(ports) port_req_body = {'port': {'binding:host_id': expected_bind_host}} self.moxed_client.update_port('test1', port_req_body).AndRaise( Exception("fail to update port")) self.mox.ReplayAll() self.assertRaises(NEUTRON_CLIENT_EXCEPTION, func, *args) def test_migrate_instance_finish_binding_false(self): self._test_update_port_binding_false('migrate_instance_finish', self.context, None, {'dest_compute': 'fake'}) def test_migrate_instance_finish_binding_true(self): migration = {'source_compute': self.instance.get('host'), 'dest_compute': 'dest_host'} instance = self._fake_instance_object(self.instance) self._test_update_port_binding_true('dest_host', 'migrate_instance_finish', self.context, instance, migration) def test_migrate_instance_finish_binding_true_exception(self): migration = {'source_compute': self.instance.get('host'), 'dest_compute': 'dest_host'} instance = self._fake_instance_object(self.instance) self._test_update_port_true_exception('dest_host', 'migrate_instance_finish', self.context, instance, migration) def test_setup_instance_network_on_host_false(self): self._test_update_port_binding_false( 'setup_instance_network_on_host', self.context, None, 'fake_host') def test_setup_instance_network_on_host_true(self): instance = self._fake_instance_object(self.instance) self._test_update_port_binding_true('fake_host', 'setup_instance_network_on_host', self.context, instance, 'fake_host') def test_setup_instance_network_on_host_exception(self): instance = self._fake_instance_object(self.instance) self._test_update_port_true_exception( 'fake_host', 'setup_instance_network_on_host', self.context, instance, 'fake_host') def test_associate_not_implemented(self): api = neutronapi.API() self.assertRaises(NotImplementedError, api.associate, self.context, 'id') class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base): def setUp(self): super(TestNeutronv2ExtraDhcpOpts, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self): self._allocate_for_instance(1, extra_dhcp_opts=False) def test_allocate_for_instance_extradhcpopts(self): dhcp_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] self._allocate_for_instance(1, dhcp_options=dhcp_opts) class TestNeutronClientForAdminScenarios(test.NoDBTestCase): @mock.patch('keystoneclient.auth.identity.v2.Password.get_token') def _test_get_client_for_admin(self, auth_mock, use_id=False, admin_context=False): token_value = uuid.uuid4().hex auth_mock.return_value = token_value self.flags(auth_strategy=None, group='neutron') self.flags(url='http://anyhost/', group='neutron') self.flags(timeout=30, group='neutron') if use_id: self.flags(admin_tenant_id='admin_tenant_id', group='neutron') self.flags(admin_user_id='admin_user_id', group='neutron') if admin_context: my_context = context.get_admin_context() else: my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') # clean global neutronapi.reset_state() if admin_context: # Note that the context does not contain a token but is # an admin context which will force an elevation to admin # credentials. context_client = neutronapi.get_client(my_context) else: # Note that the context is not elevated, but the True is passed in # which will force an elevation to admin credentials even though # the context has an auth_token. context_client = neutronapi.get_client(my_context, True) admin_auth = neutronapi._ADMIN_AUTH self.assertEqual(CONF.neutron.admin_auth_url, admin_auth.auth_url) self.assertEqual(CONF.neutron.admin_password, admin_auth.password) if use_id: self.assertEqual(CONF.neutron.admin_tenant_id, admin_auth.tenant_id) self.assertEqual(CONF.neutron.admin_user_id, admin_auth.user_id) self.assertIsNone(admin_auth.tenant_name) self.assertIsNone(admin_auth.username) else: self.assertEqual(CONF.neutron.admin_tenant_name, admin_auth.tenant_name) self.assertEqual(CONF.neutron.admin_username, admin_auth.username) self.assertIsNone(admin_auth.tenant_id) self.assertIsNone(admin_auth.user_id) self.assertEqual(CONF.neutron.timeout, neutronapi._SESSION.timeout) self.assertEqual(token_value, context_client.httpclient.auth.token) self.assertEqual(CONF.neutron.url, context_client.httpclient.auth.endpoint) def test_get_client_for_admin(self): self._test_get_client_for_admin() def test_get_client_for_admin_with_id(self): self._test_get_client_for_admin(use_id=True) def test_get_client_for_admin_context(self): self._test_get_client_for_admin(admin_context=True) def test_get_client_for_admin_context_with_id(self): self._test_get_client_for_admin(use_id=True, admin_context=True)
apache-2.0
YannThorimbert/PyWorld2D
rendering/tilers/beachtiler.py
1
4188
from thorpy._utils.images import load_image from thorpy._utils.interpolation import get_y from pygame import surfarray import math, pygame from PyWorld2D.rendering.tilers.roundtiler import RoundTiler class BeachTiler(RoundTiler): def get_round(self, radius, background): w,h = self.c.get_size() surface = self.s.copy().convert_alpha() newsurf = pygame.Surface((w,h), pygame.SRCALPHA, depth=self.c.get_bitsize()).convert_alpha() newsurf.fill((0,0,0,0)) inner = self.c.get_rect().inflate((-4*radius, -4*radius)) n_a = surfarray.pixels_alpha(newsurf) n_rgb = surfarray.pixels3d(newsurf) b_b_c_rgb = surfarray.pixels3d(self.c) rngs = [(inner.x,inner.y,0,inner.x,0,inner.y), (inner.right,inner.y,inner.right,w,0,inner.y), (inner.x,inner.bottom,0,inner.x,inner.bottom,h), (inner.right,inner.bottom,inner.right,w,inner.bottom,h)] for i, ranges in enumerate(rngs): x0,y0,xi,xf,yi,yf = ranges for x in range(xi,xf): for y in range(yi,yf): d = math.hypot(x-x0, y-y0) n_a[x][y] = 255 - get_y(d,0,2*radius) n_rgb[x][y] = b_b_c_rgb[x][y] del n_a del n_rgb surface.unlock() newsurf.unlock() surface.blit(newsurf,(0,0)) return surface def get_antiround(self, radius, background): w,h = self.c.get_size() surface = self.s.copy().convert_alpha() newsurf = pygame.Surface((w,h), pygame.SRCALPHA, depth=self.c.get_bitsize()).convert_alpha() newsurf.fill((0,0,0,0)) outer = self.c.get_rect().inflate((-2*radius, -2*radius)) n_a = surfarray.pixels_alpha(newsurf) n_rgb = surfarray.pixels3d(newsurf) b_b_c_rgb = surfarray.pixels3d(self.c) rngs = [(0,0,0,2*radius,0,2*radius), (w,0,w-2*radius,w,0,2*radius), (0,h,0,2*radius,h-2*radius,h), (w,h,w-2*radius,w,h-2*radius,h)] for i, ranges in enumerate(rngs): x0,y0,xi,xf,yi,yf = ranges for x in range(xi,xf): for y in range(yi,yf): d = math.hypot(x-x0, y-y0) n_a[x][y] = get_y(d,0,2*radius) n_rgb[x][y] = b_b_c_rgb[x][y] #grass del n_a del n_rgb surface.unlock() newsurf.unlock() surface.blit(newsurf,(0,0)) return surface def cut_side(self, side, radius, background): w,h = self.c.get_size() b_c_surf = self.c.copy().convert_alpha() #beach newsurf = pygame.Surface((w,h), pygame.SRCALPHA, depth=self.c.get_bitsize()).convert_alpha() newsurf.fill((0,0,0,0)) n_a = surfarray.pixels_alpha(newsurf) n_rgb = surfarray.pixels3d(newsurf) s_rgb = surfarray.pixels3d(self.s) b_c_rgb = surfarray.pixels3d(b_c_surf) if "top" in side: for x in range(w): for y in range(0,2*radius): n_a[x][y] = 255 - get_y(y, 0, 2*radius) n_rgb[x][y] = s_rgb[x][y] if "bottom" in side: for x in range(w): for y in range(h-2*radius,h): n_a[x][y] = get_y(y, h-2*radius, h) n_rgb[x][y] = s_rgb[x][y] if "left" in side: for y in range(h): for x in range(0,2*radius): n_a[x][y] = 255 - get_y(x, 0, 2*radius) n_rgb[x][y] = s_rgb[x][y] if "right" in side: for y in range(h): for x in range(w-2*radius,w): n_a[x][y] = get_y(x, w-2*radius, w) n_rgb[x][y] = s_rgb[x][y] del n_a del n_rgb del b_c_rgb b_c_surf.unlock() newsurf.unlock() b_c_surf.blit(newsurf, (0,0)) return b_c_surf def _debug(self): for img in self.imgs.values(): pygame.draw.rect(img, (0,0,0), img.get_rect(), 1)
mit
hiikezoe/android_kernel_asus_tf300t
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
agjohnson/readthedocs.org
readthedocs/projects/models.py
2
33611
import fnmatch import logging import os from urlparse import urlparse from distlib.version import UnsupportedVersionError from django.conf import settings from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.db import models from django.template.defaultfilters import slugify from django.utils.translation import ugettext_lazy as _ from guardian.shortcuts import assign from betterversion.better import version_windows, VersionIdentifier from builds.constants import LATEST from builds.constants import LATEST_VERBOSE_NAME from oauth import utils as oauth_utils from privacy.loader import RelatedProjectManager, ProjectManager from projects import constants from projects.exceptions import ProjectImportError from projects.templatetags.projects_tags import sort_version_aware from projects.utils import (highest_version as _highest, make_api_version, symlink, update_static_metadata) from taggit.managers import TaggableManager from tastyapi.slum import api from vcs_support.base import VCSProject from vcs_support.backends import backend_cls from vcs_support.utils import Lock, NonBlockingLock log = logging.getLogger(__name__) class ProjectRelationship(models.Model): parent = models.ForeignKey('Project', verbose_name=_('Parent'), related_name='subprojects') child = models.ForeignKey('Project', verbose_name=_('Child'), related_name='superprojects') def __unicode__(self): return "%s -> %s" % (self.parent, self.child) # HACK def get_absolute_url(self): return ("http://%s.readthedocs.org/projects/%s/%s/latest/" % (self.parent.slug, self.child.slug, self.child.language)) class Project(models.Model): # Auto fields pub_date = models.DateTimeField(_('Publication date'), auto_now_add=True) modified_date = models.DateTimeField(_('Modified date'), auto_now=True) # Generally from conf.py users = models.ManyToManyField(User, verbose_name=_('User'), related_name='projects') name = models.CharField(_('Name'), max_length=255) slug = models.SlugField(_('Slug'), max_length=255, unique=True) description = models.TextField(_('Description'), blank=True, help_text=_('The reStructuredText ' 'description of the project')) repo = models.CharField(_('Repository URL'), max_length=255, help_text=_('Hosted documentation repository URL')) repo_type = models.CharField(_('Repository type'), max_length=10, choices=constants.REPO_CHOICES, default='git') project_url = models.URLField(_('Project homepage'), blank=True, help_text=_('The project\'s homepage')) canonical_url = models.URLField(_('Canonical URL'), blank=True, help_text=_('URL that documentation is expected to serve from')) version = models.CharField(_('Version'), max_length=100, blank=True, help_text=_('Project version these docs apply ' 'to, i.e. 1.0a')) copyright = models.CharField(_('Copyright'), max_length=255, blank=True, help_text=_('Project copyright information')) theme = models.CharField( _('Theme'), max_length=20, choices=constants.DEFAULT_THEME_CHOICES, default=constants.THEME_DEFAULT, help_text=(u'<a href="http://sphinx.pocoo.org/theming.html#builtin-' 'themes" target="_blank">%s</a>') % _('Examples')) suffix = models.CharField(_('Suffix'), max_length=10, editable=False, default='.rst') single_version = models.BooleanField( _('Single version'), default=False, help_text=_('A single version site has no translations and only your "latest" version, served at the root of the domain. Use this with caution, only turn it on if you will <b>never</b> have multiple versions of your docs.')) default_version = models.CharField( _('Default version'), max_length=255, default=LATEST, help_text=_('The version of your project that / redirects to')) # In default_branch, None max_lengtheans the backend should choose the # appropraite branch. Eg 'master' for git default_branch = models.CharField( _('Default branch'), max_length=255, default=None, null=True, blank=True, help_text=_('What branch "latest" points to. Leave empty ' 'to use the default value for your VCS (eg. ' '<code>trunk</code> or <code>master</code>).')) requirements_file = models.CharField( _('Requirements file'), max_length=255, default=None, null=True, blank=True, help_text=_( 'Requires Virtualenv. A <a ' 'href="https://pip.pypa.io/en/latest/user_guide.html#requirements-files">' 'pip requirements file</a> needed to build your documentation. ' 'Path from the root of your project.')) documentation_type = models.CharField( _('Documentation type'), max_length=20, choices=constants.DOCUMENTATION_CHOICES, default='auto', help_text=_('Type of documentation you are building. <a href="http://' 'sphinx-doc.org/builders.html#sphinx.builders.html.' 'DirectoryHTMLBuilder">More info</a>.')) allow_comments = models.BooleanField(_('Allow Comments'), default=False) comment_moderation = models.BooleanField(_('Comment Moderation)'), default=False) analytics_code = models.CharField( _('Analytics code'), max_length=50, null=True, blank=True, help_text=_("Google Analytics Tracking ID " "(ex. <code>UA-22345342-1</code>). " "This may slow down your page loads.")) # Sphinx specific build options. enable_epub_build = models.BooleanField( _('Enable EPUB build'), default=True, help_text=_( 'Create a EPUB version of your documentation with each build.')) enable_pdf_build = models.BooleanField( _('Enable PDF build'), default=True, help_text=_( 'Create a PDF version of your documentation with each build.')) # Other model data. path = models.CharField(_('Path'), max_length=255, editable=False, help_text=_("The directory where " "<code>conf.py</code> lives")) conf_py_file = models.CharField( _('Python configuration file'), max_length=255, default='', blank=True, help_text=_('Path from project root to <code>conf.py</code> file ' '(ex. <code>docs/conf.py</code>).' 'Leave blank if you want us to find it for you.')) featured = models.BooleanField(_('Featured'), default=False) skip = models.BooleanField(_('Skip'), default=False) mirror = models.BooleanField(_('Mirror'), default=False) use_virtualenv = models.BooleanField( _('Use virtualenv'), help_text=_("Install your project inside a virtualenv using <code>setup.py " "install</code>"), default=False ) # This model attribute holds the python interpreter used to create the # virtual environment python_interpreter = models.CharField( _('Python Interpreter'), max_length=20, choices=constants.PYTHON_CHOICES, default='python', help_text=_("(Beta) The Python interpreter used to create the virtual " "environment.")) use_system_packages = models.BooleanField( _('Use system packages'), help_text=_("Give the virtual environment access to the global " "site-packages dir."), default=False ) django_packages_url = models.CharField(_('Django Packages URL'), max_length=255, blank=True) privacy_level = models.CharField( _('Privacy Level'), max_length=20, choices=constants.PRIVACY_CHOICES, default=getattr(settings, 'DEFAULT_PRIVACY_LEVEL', 'public'), help_text=_("(Beta) Level of privacy that you want on the repository. " "Protected means public but not in listings.")) version_privacy_level = models.CharField( _('Version Privacy Level'), max_length=20, choices=constants.PRIVACY_CHOICES, default=getattr( settings, 'DEFAULT_PRIVACY_LEVEL', 'public'), help_text=_("(Beta) Default level of privacy you want on built " "versions of documentation.")) # Subprojects related_projects = models.ManyToManyField( 'self', verbose_name=_('Related projects'), blank=True, null=True, symmetrical=False, through=ProjectRelationship) # Language bits language = models.CharField(_('Language'), max_length=20, default='en', help_text=_("The language the project " "documentation is rendered in. " "Note: this affects your project's URL."), choices=constants.LANGUAGES) programming_language = models.CharField(_('Programming Language'), max_length=20, default='words', help_text=_( "The primary programming language the project is written in."), choices=constants.PROGRAMMING_LANGUAGES, blank=True) # A subproject pointed at it's main language, so it can be tracked main_language_project = models.ForeignKey('self', related_name='translations', blank=True, null=True) # Version State num_major = models.IntegerField( _('Number of Major versions'), max_length=3, default=2, null=True, blank=True, help_text=_("2 means supporting 3.X.X and 2.X.X, but not 1.X.X") ) num_minor = models.IntegerField( _('Number of Minor versions'), max_length=3, default=2, null=True, blank=True, help_text=_("2 means supporting 2.2.X and 2.1.X, but not 2.0.X") ) num_point = models.IntegerField( _('Number of Point versions'), max_length=3, default=2, null=True, blank=True, help_text=_("2 means supporting 2.2.2 and 2.2.1, but not 2.2.0") ) tags = TaggableManager(blank=True) objects = ProjectManager() all_objects = models.Manager() class Meta: ordering = ('slug',) permissions = ( # Translators: Permission around whether a user can view the # project ('view_project', _('View Project')), ) def __unicode__(self): return self.name @property def subdomain(self): prod_domain = getattr(settings, 'PRODUCTION_DOMAIN') # if self.canonical_domain: # return self.canonical_domain # else: subdomain_slug = self.slug.replace('_', '-') return "%s.%s" % (subdomain_slug, prod_domain) def sync_supported_versions(self): supported = self.supported_versions(flat=True) if supported: self.versions.filter( verbose_name__in=supported).update(supported=True) self.versions.exclude( verbose_name__in=supported).update(supported=False) self.versions.filter(verbose_name=LATEST_VERBOSE_NAME).update(supported=True) def save(self, *args, **kwargs): first_save = self.pk is None if not self.slug: # Subdomains can't have underscores in them. self.slug = slugify(self.name).replace('_', '-') if self.slug == '': raise Exception(_("Model must have slug")) super(Project, self).save(*args, **kwargs) for owner in self.users.all(): assign('view_project', owner, self) try: if self.default_branch: latest = self.versions.get(slug=LATEST) if latest.identifier != self.default_branch: latest.identifier = self.default_branch latest.save() except Exception: log.error('Failed to update latest identifier', exc_info=True) # Add exceptions here for safety try: self.sync_supported_versions() except Exception: log.error('failed to sync supported versions', exc_info=True) try: if not first_save: symlink(project=self.slug) except Exception: log.error('failed to symlink project', exc_info=True) try: update_static_metadata(project_pk=self.pk) except Exception: log.error('failed to update static metadata', exc_info=True) try: branch = self.default_branch or self.vcs_repo().fallback_branch if not self.versions.filter(slug=LATEST).exists(): self.versions.create( slug=LATEST, verbose_name=LATEST_VERBOSE_NAME, machine=True, type='branch', active=True, identifier=branch) # if not self.versions.filter(slug=STABLE).exists(): # self.versions.create_stable(type='branch', identifier=branch) except Exception: log.error('Error creating default branches', exc_info=True) def get_absolute_url(self): return reverse('projects_detail', args=[self.slug]) def get_docs_url(self, version_slug=None, lang_slug=None): """ Return a url for the docs. Always use http for now, to avoid content warnings. """ protocol = "http" version = version_slug or self.get_default_version() lang = lang_slug or self.language use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False) if use_subdomain: if self.single_version: return "%s://%s/" % ( protocol, self.subdomain, ) else: return "%s://%s/%s/%s/" % ( protocol, self.subdomain, lang, version, ) else: if self.single_version: return reverse('docs_detail', kwargs={ 'project_slug': self.slug, 'filename': '' }) else: return reverse('docs_detail', kwargs={ 'project_slug': self.slug, 'lang_slug': lang, 'version_slug': version, 'filename': '' }) def get_translation_url(self, version_slug=None): parent = self.main_language_project lang_slug = self.language protocol = "http" version = version_slug or parent.get_default_version() use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False) if use_subdomain: return "%s://%s/%s/%s/" % ( protocol, parent.subdomain, lang_slug, version, ) else: return reverse('docs_detail', kwargs={ 'project_slug': parent.slug, 'lang_slug': lang_slug, 'version_slug': version, 'filename': '' }) def get_builds_url(self): return reverse('builds_project_list', kwargs={ 'project_slug': self.slug, }) def get_production_media_path(self, type, version_slug, include_file=True): """ Get file path for media files in production. This is used to see if these files exist so we can offer them for download. """ if getattr(settings, 'DEFAULT_PRIVACY_LEVEL', 'public') == 'public': path = os.path.join( settings.MEDIA_ROOT, type, self.slug, version_slug) else: path = os.path.join( settings.PRODUCTION_MEDIA_ARTIFACTS, type, self.slug, version_slug) if include_file: path = os.path.join( path, '%s.%s' % (self.slug, type.replace('htmlzip', 'zip'))) return path def get_production_media_url(self, type, version_slug, full_path=True): """ Get the URL for downloading a specific media file. """ path = reverse('project_download_media', kwargs={ 'project_slug': self.slug, 'type': type, 'version_slug': version_slug, }) if full_path: path = '//%s%s' % (settings.PRODUCTION_DOMAIN, path) return path def get_downloads(self): downloads = {} downloads['htmlzip'] = self.get_production_media_url( 'htmlzip', self.get_default_version()) downloads['epub'] = self.get_production_media_url( 'epub', self.get_default_version()) downloads['pdf'] = self.get_production_media_url( 'pdf', self.get_default_version()) return downloads @property def canonical_domain(self): if not self.clean_canonical_url: return "" return urlparse(self.clean_canonical_url).netloc @property def clean_canonical_url(self): if not self.canonical_url: return "" parsed = urlparse(self.canonical_url) if parsed.scheme: scheme, netloc = parsed.scheme, parsed.netloc elif parsed.netloc: scheme, netloc = "http", parsed.netloc else: scheme, netloc = "http", parsed.path if getattr(settings, 'DONT_HIT_DB', True): if parsed.path: netloc = netloc + parsed.path else: if self.superprojects.count() and parsed.path: netloc = netloc + parsed.path return "%s://%s/" % (scheme, netloc) @property def clean_repo(self): if self.repo.startswith('http://github.com'): return self.repo.replace('http://github.com', 'https://github.com') return self.repo # Doc PATH: # MEDIA_ROOT/slug/checkouts/version/<repo> @property def doc_path(self): return os.path.join(settings.DOCROOT, self.slug.replace('_', '-')) def checkout_path(self, version=LATEST): return os.path.join(self.doc_path, 'checkouts', version) def venv_path(self, version=LATEST): return os.path.join(self.doc_path, 'envs', version) # # Paths for symlinks in project doc_path. # def cnames_symlink_path(self, domain): """ Path in the doc_path that we symlink cnames This has to be at the top-level because Nginx doesn't know the projects slug. """ return os.path.join(settings.CNAME_ROOT, domain) def translations_symlink_path(self, language=None): """ Path in the doc_path that we symlink translations """ if not language: language = self.language return os.path.join(self.doc_path, 'translations', language) def subprojects_symlink_path(self, project): """ Path in the doc_path that we symlink subprojects """ return os.path.join(self.doc_path, 'subprojects', project) def single_version_symlink_path(self): """ Path in the doc_path for the single_version symlink. """ return os.path.join(self.doc_path, 'single_version') # # End symlink paths # def venv_bin(self, version=LATEST, bin='python'): return os.path.join(self.venv_path(version), 'bin', bin) def full_doc_path(self, version=LATEST): """ The path to the documentation root in the project. """ doc_base = self.checkout_path(version) for possible_path in ['docs', 'doc', 'Doc']: if os.path.exists(os.path.join(doc_base, '%s' % possible_path)): return os.path.join(doc_base, '%s' % possible_path) # No docs directory, docs are at top-level. return doc_base def artifact_path(self, type, version=LATEST): """ The path to the build html docs in the project. """ return os.path.join(self.doc_path, "artifacts", version, type) def full_build_path(self, version=LATEST): """ The path to the build html docs in the project. """ return os.path.join(self.conf_dir(version), "_build", "html") def full_latex_path(self, version=LATEST): """ The path to the build LaTeX docs in the project. """ return os.path.join(self.conf_dir(version), "_build", "latex") """ The path to the build epub docs in the project. """ return os.path.join(self.conf_dir(version), "_build", "epub") # There is currently no support for building man/dash formats, but we keep # the support there for existing projects. They might have already existing # legacy builds. def full_man_path(self, version=LATEST): """ The path to the build man docs in the project. """ return os.path.join(self.conf_dir(version), "_build", "man") def full_dash_path(self, version=LATEST): """ The path to the build dash docs in the project. """ return os.path.join(self.conf_dir(version), "_build", "dash") def full_json_path(self, version=LATEST): """ The path to the build json docs in the project. """ if 'sphinx' in self.documentation_type: return os.path.join(self.conf_dir(version), "_build", "json") elif 'mkdocs' in self.documentation_type: return os.path.join(self.checkout_path(version), "_build", "json") def full_singlehtml_path(self, version=LATEST): """ The path to the build singlehtml docs in the project. """ return os.path.join(self.conf_dir(version), "_build", "singlehtml") def rtd_build_path(self, version=LATEST): """ The destination path where the built docs are copied. """ return os.path.join(self.doc_path, 'rtd-builds', version) def static_metadata_path(self): """ The path to the static metadata JSON settings file """ return os.path.join(self.doc_path, 'metadata.json') def conf_file(self, version=LATEST): if self.conf_py_file: conf_path = os.path.join(self.checkout_path(version), self.conf_py_file) if os.path.exists(conf_path): log.info('Inserting conf.py file path from model') return conf_path else: log.warning("Conf file specified on model doesn't exist") files = self.find('conf.py', version) print files if not files: files = self.full_find('conf.py', version) print files if len(files) == 1: return files[0] for file in files: if file.find('doc', 70) != -1: return file # Having this be translatable causes this odd error: # ProjectImportError(<django.utils.functional.__proxy__ object at # 0x1090cded0>,) raise ProjectImportError( u"Conf File Missing. Please make sure you have a conf.py in your project.") def conf_dir(self, version=LATEST): conf_file = self.conf_file(version) if conf_file: return conf_file.replace('/conf.py', '') @property def highest_version(self): return _highest(self.api_versions()) @property def is_imported(self): return bool(self.repo) @property def has_good_build(self): return self.builds.filter(success=True).exists() @property def has_versions(self): return self.versions.exists() @property def has_aliases(self): return self.aliases.exists() def has_pdf(self, version_slug=LATEST): return os.path.exists(self.get_production_media_path(type='pdf', version_slug=version_slug)) def has_epub(self, version_slug=LATEST): return os.path.exists(self.get_production_media_path(type='epub', version_slug=version_slug)) def has_htmlzip(self, version_slug=LATEST): return os.path.exists(self.get_production_media_path(type='htmlzip', version_slug=version_slug)) @property def sponsored(self): return False def vcs_repo(self, version=LATEST): backend = backend_cls.get(self.repo_type) if not backend: repo = None else: proj = VCSProject( self.name, self.default_branch, self.checkout_path(version), self.clean_repo) repo = backend(proj, version) return repo @property def contribution_backend(self): if hasattr(self, '_contribution_backend'): return self._contribution_backend if not self.vcs_repo: cb = None else: cb = self.vcs_repo.get_contribution_backend() self._contribution_backend = cb return cb def repo_nonblockinglock(self, version, max_lock_age=5): return NonBlockingLock(project=self, version=version, max_lock_age=max_lock_age) def repo_lock(self, version, timeout=5, polling_interval=5): return Lock(self, version, timeout, polling_interval) def find(self, file, version): """ A balla API to find files inside of a projects dir. """ matches = [] for root, dirnames, filenames in os.walk(self.full_doc_path(version)): for filename in fnmatch.filter(filenames, file): matches.append(os.path.join(root, filename)) return matches def full_find(self, file, version): """ A balla API to find files inside of a projects dir. """ matches = [] for root, dirnames, filenames in os.walk(self.checkout_path(version)): for filename in fnmatch.filter(filenames, file): matches.append(os.path.join(root, filename)) return matches def get_latest_build(self, finished=True): """ Get latest build for project finished Return only builds that are in a finished state """ kwargs = {'type': 'html'} if finished: kwargs['state'] = 'finished' return self.builds.filter(**kwargs).first() def api_versions(self): ret = [] for version_data in api.version.get(project=self.pk, active=True)['objects']: version = make_api_version(version_data) ret.append(version) return sort_version_aware(ret) def active_versions(self): from builds.models import Version versions = Version.objects.public(project=self, only_active=True) return (versions.filter(built=True, active=True) | versions.filter(active=True, uploaded=True)) def ordered_active_versions(self): from builds.models import Version versions = Version.objects.public(project=self, only_active=True) return sort_version_aware(versions) def all_active_versions(self): """A temporary workaround for active_versions filtering out things that were active, but failed to build """ return self.versions.filter(active=True) def supported_versions(self, flat=True): """ Get the list of supported versions. Returns a list of version strings. """ if not self.num_major or not self.num_minor or not self.num_point: return None version_identifiers = [] for version in self.versions.all(): try: version_identifiers.append(VersionIdentifier(version.verbose_name)) except UnsupportedVersionError: # Probably a branch pass active_versions = version_windows( version_identifiers, major=self.num_major, minor=self.num_minor, point=self.num_point, flat=flat, ) version_strings = [v._string for v in active_versions] return version_strings def version_from_branch_name(self, branch): try: return ( self.versions.filter(identifier=branch) | self.versions.filter(identifier=('remotes/origin/%s' % branch)) | self.versions.filter(identifier=('origin/%s' % branch)) )[0] except IndexError: return None def versions_from_branch_name(self, branch): return ( self.versions.filter(identifier=branch) | self.versions.filter(identifier='remotes/origin/%s' % branch) | self.versions.filter(identifier='origin/%s' % branch) ) def get_default_version(self): """ Get the default version (slug). Returns self.default_version if the version with that slug actually exists (is built and published). Otherwise returns 'latest'. """ # latest is a special case where we don't have to check if it exists if self.default_version == LATEST: return self.default_version # check if the default_version exists version_qs = self.versions.filter( slug=self.default_version, active=True ) if version_qs.exists(): return self.default_version return LATEST def get_default_branch(self): """ Get the version representing "latest" """ if self.default_branch: return self.default_branch else: return self.vcs_repo().fallback_branch def add_subproject(self, child): subproject, created = ProjectRelationship.objects.get_or_create( parent=self, child=child, ) return subproject def remove_subproject(self, child): ProjectRelationship.objects.filter(parent=self, child=child).delete() return def moderation_queue(self): # non-optimal SQL warning. from comments.models import DocumentComment queue = [] comments = DocumentComment.objects.filter(node__project=self) for comment in comments: if not comment.has_been_approved_since_most_recent_node_change(): queue.append(comment) return queue def add_node(self, node_hash, page, version, commit): from comments.models import NodeSnapshot, DocumentNode project_obj = Project.objects.get(slug=self.slug) version_obj = project_obj.versions.get(slug=version) try: NodeSnapshot.objects.get(hash=node_hash, node__project=project_obj, node__version=version_obj, node__page=page, commit=commit) return False # ie, no new node was created. except NodeSnapshot.DoesNotExist: DocumentNode.objects.create( hash=node_hash, page=page, project=project_obj, version=version_obj, commit=commit ) return True # ie, it's True that a new node was created. def add_comment(self, version_slug, page, hash, commit, user, text): from comments.models import DocumentNode try: node = self.nodes.from_hash(version_slug, page, hash) except DocumentNode.DoesNotExist: version = self.versions.get(slug=version_slug) node = self.nodes.create(version=version, page=page, hash=hash, commit=commit) return node.comments.create(user=user, text=text) class ImportedFile(models.Model): project = models.ForeignKey('Project', verbose_name=_('Project'), related_name='imported_files') version = models.ForeignKey('builds.Version', verbose_name=_('Version'), related_name='imported_files', null=True) name = models.CharField(_('Name'), max_length=255) slug = models.SlugField(_('Slug')) path = models.CharField(_('Path'), max_length=255) md5 = models.CharField(_('MD5 checksum'), max_length=255) commit = models.CharField(_('Commit'), max_length=255) @models.permalink def get_absolute_url(self): return ('docs_detail', [self.project.slug, self.project.language, self.version.slug, self.path]) def __unicode__(self): return '%s: %s' % (self.name, self.project) class Notification(models.Model): project = models.ForeignKey(Project, related_name='%(class)s_notifications') objects = RelatedProjectManager() class Meta: abstract = True class EmailHook(Notification): email = models.EmailField() def __unicode__(self): return self.email class WebHook(Notification): url = models.URLField(blank=True, help_text=_('URL to send the webhook to')) def __unicode__(self): return self.url
mit
unreal666/outwiker
plugins/thumbgallery/thumbgallery/thumbtablegenerator.py
3
3320
# -*- coding: utf-8 -*- from outwiker.core.attachment import Attachment from outwiker.core.defines import PAGE_ATTACH_DIR from .basethumbgenerator import BaseThumbGenerator class ThumbTableGenerator (BaseThumbGenerator): """ Создание списка превьюшек в виде таблицы """ def __init__(self, items, thumbsize, parser, cols): """ items - список кортежей, описывающие прикрепленные файлов, из которых надо сделать превьюшки (первый элемент), и комментарии к ним (второй элемент) thumbsize - размер превьюшек (по наибольшей стороне) parser - экземпляр википарсера (Parser) cols - количество столбцов таблицы """ super().__init__(items, thumbsize, parser) self._cols = cols # Обертка для галереи в целом self._fullTemplate = u'<table class="thumblist-table">{content}</table>' self._rowTemplate = u'<tr class="thumblist-row">{row}</tr>' self._singleThumbTemplate = u'<td class="thumblist-td"><div class="thumblist-table-item"><div class="thumblist-table-image">{thumbimage}</div><div class="thumblist-table-comment">{comment}</div></div></td>' self._style = """<!-- Begin Thumblist styles --> <style> table.thumblist-table { border: 1px solid #DDD; } div.thumblist-table-item{ padding: 1em; } td.thumblist-td { border: 1px solid #DDD; text-align: center; } div.thumblist-table-image{ text-align: center; } div.thumblist-table-comment{ text-align: center; height: 100%; } </style> <!-- End Thumblist styles -->""" def generate(self): """ Возвращает строку, содержащую HTML-текст галереи """ if self._style not in self._parser.head: self._parser.appendToHead(self._style) resultContent = self._generateRows(self._items) return self._fullTemplate.format(content=resultContent) def _generateItemText(self, item): """ Возвращает оформленный элемент таблицы """ image = u"""<A HREF="{attachdir}/{imagename}"><IMG SRC="{thumbpath}"/></A>""".format( attachdir=PAGE_ATTACH_DIR, imagename=item[0], thumbpath=self._getThumbnail(self._parser.page, item[0])) return self._singleThumbTemplate.format(thumbimage=image, comment=item[1]) def _generateRows(self, items): """ Возвращает список строк, описывающих строки таблицы """ itemsText = [self._generateItemText(item) for item in self._items] # Разрежем список на несколько списков, длиной self._cols splitItems = [itemsText[i: i + self._cols] for i in range(0, len(itemsText), self._cols)] rows = [self._rowTemplate.format(row=u"".join(row)) for row in splitItems] return u"".join(rows)
gpl-3.0
Hazelsuko07/17WarmingUp
py3.6/lib/python3.6/site-packages/click/decorators.py
204
10941
import sys import inspect from functools import update_wrapper from ._compat import iteritems from ._unicodefun import _check_for_unicode_literals from .utils import echo from .globals import get_current_context def pass_context(f): """Marks a callback as wanting to receive the current context object as first argument. """ def new_func(*args, **kwargs): return f(get_current_context(), *args, **kwargs) return update_wrapper(new_func, f) def pass_obj(f): """Similar to :func:`pass_context`, but only pass the object on the context onwards (:attr:`Context.obj`). This is useful if that object represents the state of a nested system. """ def new_func(*args, **kwargs): return f(get_current_context().obj, *args, **kwargs) return update_wrapper(new_func, f) def make_pass_decorator(object_type, ensure=False): """Given an object type this creates a decorator that will work similar to :func:`pass_obj` but instead of passing the object of the current context, it will find the innermost context of type :func:`object_type`. This generates a decorator that works roughly like this:: from functools import update_wrapper def decorator(f): @pass_context def new_func(ctx, *args, **kwargs): obj = ctx.find_object(object_type) return ctx.invoke(f, obj, *args, **kwargs) return update_wrapper(new_func, f) return decorator :param object_type: the type of the object to pass. :param ensure: if set to `True`, a new object will be created and remembered on the context if it's not there yet. """ def decorator(f): def new_func(*args, **kwargs): ctx = get_current_context() if ensure: obj = ctx.ensure_object(object_type) else: obj = ctx.find_object(object_type) if obj is None: raise RuntimeError('Managed to invoke callback without a ' 'context object of type %r existing' % object_type.__name__) return ctx.invoke(f, obj, *args[1:], **kwargs) return update_wrapper(new_func, f) return decorator def _make_command(f, name, attrs, cls): if isinstance(f, Command): raise TypeError('Attempted to convert a callback into a ' 'command twice.') try: params = f.__click_params__ params.reverse() del f.__click_params__ except AttributeError: params = [] help = attrs.get('help') if help is None: help = inspect.getdoc(f) if isinstance(help, bytes): help = help.decode('utf-8') else: help = inspect.cleandoc(help) attrs['help'] = help _check_for_unicode_literals() return cls(name=name or f.__name__.lower(), callback=f, params=params, **attrs) def command(name=None, cls=None, **attrs): """Creates a new :class:`Command` and uses the decorated function as callback. This will also automatically attach all decorated :func:`option`\s and :func:`argument`\s as parameters to the command. The name of the command defaults to the name of the function. If you want to change that, you can pass the intended name as the first argument. All keyword arguments are forwarded to the underlying command class. Once decorated the function turns into a :class:`Command` instance that can be invoked as a command line utility or be attached to a command :class:`Group`. :param name: the name of the command. This defaults to the function name. :param cls: the command class to instantiate. This defaults to :class:`Command`. """ if cls is None: cls = Command def decorator(f): cmd = _make_command(f, name, attrs, cls) cmd.__doc__ = f.__doc__ return cmd return decorator def group(name=None, **attrs): """Creates a new :class:`Group` with a function as callback. This works otherwise the same as :func:`command` just that the `cls` parameter is set to :class:`Group`. """ attrs.setdefault('cls', Group) return command(name, **attrs) def _param_memo(f, param): if isinstance(f, Command): f.params.append(param) else: if not hasattr(f, '__click_params__'): f.__click_params__ = [] f.__click_params__.append(param) def argument(*param_decls, **attrs): """Attaches an argument to the command. All positional arguments are passed as parameter declarations to :class:`Argument`; all keyword arguments are forwarded unchanged (except ``cls``). This is equivalent to creating an :class:`Argument` instance manually and attaching it to the :attr:`Command.params` list. :param cls: the argument class to instantiate. This defaults to :class:`Argument`. """ def decorator(f): ArgumentClass = attrs.pop('cls', Argument) _param_memo(f, ArgumentClass(param_decls, **attrs)) return f return decorator def option(*param_decls, **attrs): """Attaches an option to the command. All positional arguments are passed as parameter declarations to :class:`Option`; all keyword arguments are forwarded unchanged (except ``cls``). This is equivalent to creating an :class:`Option` instance manually and attaching it to the :attr:`Command.params` list. :param cls: the option class to instantiate. This defaults to :class:`Option`. """ def decorator(f): if 'help' in attrs: attrs['help'] = inspect.cleandoc(attrs['help']) OptionClass = attrs.pop('cls', Option) _param_memo(f, OptionClass(param_decls, **attrs)) return f return decorator def confirmation_option(*param_decls, **attrs): """Shortcut for confirmation prompts that can be ignored by passing ``--yes`` as parameter. This is equivalent to decorating a function with :func:`option` with the following parameters:: def callback(ctx, param, value): if not value: ctx.abort() @click.command() @click.option('--yes', is_flag=True, callback=callback, expose_value=False, prompt='Do you want to continue?') def dropdb(): pass """ def decorator(f): def callback(ctx, param, value): if not value: ctx.abort() attrs.setdefault('is_flag', True) attrs.setdefault('callback', callback) attrs.setdefault('expose_value', False) attrs.setdefault('prompt', 'Do you want to continue?') attrs.setdefault('help', 'Confirm the action without prompting.') return option(*(param_decls or ('--yes',)), **attrs)(f) return decorator def password_option(*param_decls, **attrs): """Shortcut for password prompts. This is equivalent to decorating a function with :func:`option` with the following parameters:: @click.command() @click.option('--password', prompt=True, confirmation_prompt=True, hide_input=True) def changeadmin(password): pass """ def decorator(f): attrs.setdefault('prompt', True) attrs.setdefault('confirmation_prompt', True) attrs.setdefault('hide_input', True) return option(*(param_decls or ('--password',)), **attrs)(f) return decorator def version_option(version=None, *param_decls, **attrs): """Adds a ``--version`` option which immediately ends the program printing out the version number. This is implemented as an eager option that prints the version and exits the program in the callback. :param version: the version number to show. If not provided Click attempts an auto discovery via setuptools. :param prog_name: the name of the program (defaults to autodetection) :param message: custom message to show instead of the default (``'%(prog)s, version %(version)s'``) :param others: everything else is forwarded to :func:`option`. """ if version is None: module = sys._getframe(1).f_globals.get('__name__') def decorator(f): prog_name = attrs.pop('prog_name', None) message = attrs.pop('message', '%(prog)s, version %(version)s') def callback(ctx, param, value): if not value or ctx.resilient_parsing: return prog = prog_name if prog is None: prog = ctx.find_root().info_name ver = version if ver is None: try: import pkg_resources except ImportError: pass else: for dist in pkg_resources.working_set: scripts = dist.get_entry_map().get('console_scripts') or {} for script_name, entry_point in iteritems(scripts): if entry_point.module_name == module: ver = dist.version break if ver is None: raise RuntimeError('Could not determine version') echo(message % { 'prog': prog, 'version': ver, }, color=ctx.color) ctx.exit() attrs.setdefault('is_flag', True) attrs.setdefault('expose_value', False) attrs.setdefault('is_eager', True) attrs.setdefault('help', 'Show the version and exit.') attrs['callback'] = callback return option(*(param_decls or ('--version',)), **attrs)(f) return decorator def help_option(*param_decls, **attrs): """Adds a ``--help`` option which immediately ends the program printing out the help page. This is usually unnecessary to add as this is added by default to all commands unless suppressed. Like :func:`version_option`, this is implemented as eager option that prints in the callback and exits. All arguments are forwarded to :func:`option`. """ def decorator(f): def callback(ctx, param, value): if value and not ctx.resilient_parsing: echo(ctx.get_help(), color=ctx.color) ctx.exit() attrs.setdefault('is_flag', True) attrs.setdefault('expose_value', False) attrs.setdefault('help', 'Show this message and exit.') attrs.setdefault('is_eager', True) attrs['callback'] = callback return option(*(param_decls or ('--help',)), **attrs)(f) return decorator # Circular dependencies between core and decorators from .core import Command, Group, Argument, Option
mit
tiagofrepereira2012/tensorflow
tensorflow/python/ops/candidate_sampling_ops.py
55
17656
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrappers for candidate sampling operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_candidate_sampling_ops from tensorflow.python.ops import math_ops def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None): """Samples a set of classes using a uniform base distribution. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution for this operation is the uniform distribution over the range of integers `[0, range_max)`. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. The `sampled_candidates` return value will have shape `[num_sampled]`. If `unique=True`, `num_sampled` must be less than or equal to `range_max`. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes, either with possible duplicates (`unique=False`) or all unique (`unique=True`). In either case, `sampled_candidates` is independent of the true classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`. """ seed1, seed2 = random_seed.get_seed(seed) return gen_candidate_sampling_ops._uniform_candidate_sampler( true_classes, num_true, num_sampled, unique, range_max, seed=seed1, seed2=seed2, name=name) def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None): """Samples a set of classes using a log-uniform (Zipfian) base distribution. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution for this operation is an approximately log-uniform or Zipfian distribution: `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` This sampler is useful when the target classes approximately follow such a distribution - for example, if the classes represent words in a lexicon sorted in decreasing order of frequency. If your classes are not ordered by decreasing frequency, do not use this op. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`. """ seed1, seed2 = random_seed.get_seed(seed) return gen_candidate_sampling_ops._log_uniform_candidate_sampler( true_classes, num_true, num_sampled, unique, range_max, seed=seed1, seed2=seed2, name=name) def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None): """Samples a set of classes from a distribution learned during training. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution for this operation is constructed on the fly during training. It is a unigram distribution over the target classes seen so far during training. Every integer in `[0, range_max)` begins with a weight of 1, and is incremented by 1 each time it is seen as a target class. The base distribution is not saved to checkpoints, so it is reset when the model is reloaded. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`. """ seed1, seed2 = random_seed.get_seed(seed) return gen_candidate_sampling_ops._learned_unigram_candidate_sampler( true_classes, num_true, num_sampled, unique, range_max, seed=seed1, seed2=seed2, name=name) def fixed_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, vocab_file='', distortion=1.0, num_reserved_ids=0, num_shards=1, shard=0, unigrams=(), seed=None, name=None): """Samples a set of classes using the provided (fixed) base distribution. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution is read from a file or passed in as an in-memory array. There is also an option to skew the distribution by applying a distortion power to the weights. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. vocab_file: Each valid line in this file (which should have a CSV-like format) corresponds to a valid word ID. IDs are in sequential order, starting from num_reserved_ids. The last entry in each line is expected to be a value corresponding to the count or relative probability. Exactly one of `vocab_file` and `unigrams` needs to be passed to this operation. distortion: The distortion is used to skew the unigram probability distribution. Each weight is first raised to the distortion's power before adding to the internal unigram distribution. As a result, `distortion = 1.0` gives regular unigram sampling (as defined by the vocab file), and `distortion = 0.0` gives a uniform distribution. num_reserved_ids: Optionally some reserved IDs can be added in the range `[0, num_reserved_ids)` by the users. One use case is that a special unknown word token is used as ID 0. These IDs will have a sampling probability of 0. num_shards: A sampler can be used to sample from a subset of the original range in order to speed up the whole computation through parallelism. This parameter (together with `shard`) indicates the number of partitions that are being used in the overall computation. shard: A sampler can be used to sample from a subset of the original range in order to speed up the whole computation through parallelism. This parameter (together with `num_shards`) indicates the particular partition number of the operation, when partitioning is being used. unigrams: A list of unigram counts or probabilities, one per ID in sequential order. Exactly one of `vocab_file` and `unigrams` should be passed to this operation. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`. """ seed1, seed2 = random_seed.get_seed(seed) return gen_candidate_sampling_ops._fixed_unigram_candidate_sampler( true_classes, num_true, num_sampled, unique, range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed1, seed2=seed2, name=name) def all_candidate_sampler(true_classes, num_true, num_sampled, unique, seed=None, name=None): """Generate the set of all classes. Deterministically generates and returns the set of all possible classes. For testing purposes. There is no need to use this, since you might as well use full softmax or full logistic regression. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of possible classes. unique: A `bool`. Ignored. unique. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. This operation deterministically returns the entire range `[0, num_sampled]`. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. All returned values are 1.0. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`. All returned values are 1.0. """ seed1, seed2 = random_seed.get_seed(seed) return gen_candidate_sampling_ops._all_candidate_sampler( true_classes, num_true, num_sampled, unique, seed=seed1, seed2=seed2, name=name) def compute_accidental_hits(true_classes, sampled_candidates, num_true, seed=None, name=None): """Compute the position ids in `sampled_candidates` matching `true_classes`. In Candidate Sampling, this operation facilitates virtually removing sampled classes which happen to match target classes. This is done in Sampled Softmax and Sampled Logistic. See our [Candidate Sampling Algorithms Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). We presuppose that the `sampled_candidates` are unique. We call it an 'accidental hit' when one of the target classes matches one of the sampled classes. This operation reports accidental hits as triples `(index, id, weight)`, where `index` represents the row number in `true_classes`, `id` represents the position in `sampled_candidates`, and weight is `-FLOAT_MAX`. The result of this op should be passed through a `sparse_to_dense` operation, then added to the logits of the sampled classes. This removes the contradictory effect of accidentally sampling the true target classes as noise classes for the same example. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled_candidates output of CandidateSampler. num_true: An `int`. The number of target classes per training example. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`. Values indicate rows in `true_classes`. ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`. Values indicate positions in `sampled_candidates`. weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`. Each value is `-FLOAT_MAX`. """ seed1, seed2 = random_seed.get_seed(seed) return gen_candidate_sampling_ops._compute_accidental_hits( true_classes, sampled_candidates, num_true, seed=seed1, seed2=seed2, name=name)
apache-2.0
r3dshirt/Python-Koans
python 3/libs/colorama/win32.py
86
2730
# from winbase.h STDOUT = -11 STDERR = -12 try: from ctypes import windll except ImportError: windll = None SetConsoleTextAttribute = lambda *_: None else: from ctypes import ( byref, Structure, c_char, c_short, c_uint32, c_ushort ) handles = { STDOUT: windll.kernel32.GetStdHandle(STDOUT), STDERR: windll.kernel32.GetStdHandle(STDERR), } SHORT = c_short WORD = c_ushort DWORD = c_uint32 TCHAR = c_char class COORD(Structure): """struct in wincon.h""" _fields_ = [ ('X', SHORT), ('Y', SHORT), ] class SMALL_RECT(Structure): """struct in wincon.h.""" _fields_ = [ ("Left", SHORT), ("Top", SHORT), ("Right", SHORT), ("Bottom", SHORT), ] class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", WORD), ("srWindow", SMALL_RECT), ("dwMaximumWindowSize", COORD), ] def GetConsoleScreenBufferInfo(stream_id): handle = handles[stream_id] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = windll.kernel32.GetConsoleScreenBufferInfo( handle, byref(csbi)) # This fails when imported via setup.py when installing using 'pip' # presumably the fix is that running setup.py should not trigger all # this activity. # assert success return csbi def SetConsoleTextAttribute(stream_id, attrs): handle = handles[stream_id] success = windll.kernel32.SetConsoleTextAttribute(handle, attrs) assert success def SetConsoleCursorPosition(stream_id, position): handle = handles[stream_id] position = COORD(*position) success = windll.kernel32.SetConsoleCursorPosition(handle, position) assert success def FillConsoleOutputCharacter(stream_id, char, length, start): handle = handles[stream_id] char = TCHAR(char) length = DWORD(length) start = COORD(*start) num_written = DWORD(0) # AttributeError: function 'FillConsoleOutputCharacter' not found # could it just be that my types are wrong? success = windll.kernel32.FillConsoleOutputCharacter( handle, char, length, start, byref(num_written)) assert success return num_written.value if __name__=='__main__': x = GetConsoleScreenBufferInfo(STDOUT) print(x.dwSize) print(x.dwCursorPosition) print(x.wAttributes) print(x.srWindow) print(x.dwMaximumWindowSize)
mit
Manojkumar91/odoo_inresto
addons/base_action_rule/__openerp__.py
7
1830
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Automated Action Rules', 'version': '1.0', 'category': 'Sales Management', 'description': """ This module allows to implement action rules for any object. ============================================================ Use automated actions to automatically trigger actions for various screens. **Example:** A lead created by a specific user may be automatically set to a specific sales team, or an opportunity which still has status pending after 14 days might trigger an automatic reminder email. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com', 'depends': ['base', 'resource', 'mail'], 'data': [ 'base_action_rule_data.xml', 'base_action_rule_view.xml', 'security/ir.model.access.csv', ], 'demo': [], 'installable': True, 'auto_install': False, }
agpl-3.0
vigneras/sequencer
tests/commons.py
2
2223
# -*- coding: utf-8 -*- ############################################################################### # Copyright (C) Bull S.A.S (2010, 2011) # Contributor: Pierre Vignéras <pierre.vigneras@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################### import unittest import sys from sequencer.commons import get_header, GenericDB import re import logging _logger = logging.getLogger() _formatter = logging.Formatter('%(relativeCreated)s %(levelname)s %(funcName)s() - %(message)s') _handler = logging.StreamHandler(sys.stdout) _handler.setFormatter(_formatter) _logger.addHandler(_handler) _logger.setLevel(logging.DEBUG) class BaseTest(unittest.TestCase): """Base Test Class for all sequencer""" def setUp(self): _logger.debug(get_header(" Start %s " % self.id(), "*", 120)) def tearDown(self): # _logger.debug(get_header(" Stop %s " % self.id(), "*", 80)) pass class BaseGraph(BaseTest): def assertNoEdgeBetween(self, graph, a, b): self.assertFalse(graph.has_edge((a, b)) or graph.has_edge((b,a))) class SQLiteDB(GenericDB): def __init__(self, name, connection): GenericDB.__init__(self, name, connection, '?') self.connection.create_function("REGEXP", 2, self.regexp) @staticmethod def regexp(expr, item): reg = re.compile(expr) result = reg.search(item) is not None return result def sql_match_exp(self, column, re): return "%s REGEXP '%s'" % (column, re)
gpl-3.0
firmlyjin/brython
www/tests/test_storage.py
17
1710
from browser.local_storage import storage # legacy import from browser.session_storage import storage as sess_storage # legacy import from browser.object_storage import ObjectStorage assert(storage.storage_type == "local_storage") assert(sess_storage.storage_type == "session_storage") storage.clear() sess_storage.clear() session_storage = sess_storage local_storage = storage session_storage['hi'] = "blah" assert(session_storage.get("hi") == "blah") session_storage['foo'] = "arg" assert(session_storage.pop('foo') == "arg") assert(sorted(session_storage.keys()) == ['hi']) assert(len(session_storage) == 1) del session_storage['hi'] assert(len(session_storage.keys()) == 0) try: local_storage.pop('hi') except KeyError: pass else: raise Exception("pop with no default on missing key did not raise key error") assert(local_storage.pop('hi', "passed again") == "passed again") local_storage['hi'] = '5' for key in local_storage: assert(local_storage[key] == '5') assert(local_storage.items() == [('hi', '5')]) object_storage = ObjectStorage(local_storage) object_storage.clear() object_storage['mah'] = {"hi": 5} assert(object_storage['mah'] == {'hi': 5}) object_storage[['hello', 'there']] = "gracias" assert(object_storage[['hello', 'there']] == "gracias") obj = object_storage.pop('mah') assert(obj['hi'] == 5) assert(obj == {"hi": 5}) assert(len(object_storage) == 1) for itm in object_storage: assert(itm == ['hello', 'there']) for k, v in object_storage.items(): assert(k == ['hello', 'there']) assert(v == "gracias") assert(object_storage.get('not here') == None) del object_storage[['hello', 'there']] assert(len(object_storage) == 0) print("passed all tests")
bsd-3-clause
cs-au-dk/Artemis
WebKit/Tools/Scripts/webkitpy/common/memoized_unittest.py
84
2595
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from webkitpy.common.memoized import memoized class _TestObject(object): def __init__(self): self.callCount = 0 @memoized def memoized_add(self, argument): """testing docstring""" self.callCount += 1 if argument is None: return None # Avoid the TypeError from None + 1 return argument + 1 class MemoizedTest(unittest.TestCase): def test_caching(self): test = _TestObject() test.callCount = 0 self.assertEqual(test.memoized_add(1), 2) self.assertEqual(test.callCount, 1) self.assertEqual(test.memoized_add(1), 2) self.assertEqual(test.callCount, 1) # Validate that callCount is working as expected. self.assertEqual(test.memoized_add(2), 3) self.assertEqual(test.callCount, 2) def test_tearoff(self): test = _TestObject() # Make sure that get()/tear-offs work: tearoff = test.memoized_add self.assertEqual(tearoff(4), 5) self.assertEqual(test.callCount, 1)
gpl-3.0
psci2195/espresso-ffans
testsuite/python/linear_momentum.py
5
1406
# Copyright (C) 2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import unittest as ut import numpy as np import espressomd import unittest_decorators as utx N_PART = 10 VELOCITY = np.array([1.0, 2.0, 3.0]) MASS = 2.1 @utx.skipIfMissingFeatures("MASS") class LinearMomentumTest(ut.TestCase): system = None @classmethod def setUpClass(cls): cls.system = espressomd.System(box_l=[10.0] * 3) def test(self): self.system.part.add(pos=np.random.random((N_PART, 3)), v=np.ones( (N_PART, 3)) * VELOCITY, mass=np.ones((N_PART)) * MASS) linear_momentum = self.system.analysis.linear_momentum() np.testing.assert_allclose(linear_momentum, N_PART * MASS * VELOCITY) if __name__ == "__main__": ut.main()
gpl-3.0
jn7163/django
tests/template_tests/syntax_tests/test_exceptions.py
513
2099
from django.template import TemplateDoesNotExist, TemplateSyntaxError from django.test import SimpleTestCase from ..utils import setup from .test_extends import inheritance_templates class ExceptionsTests(SimpleTestCase): @setup({'exception01': "{% extends 'nonexistent' %}"}) def test_exception01(self): """ Raise exception for invalid template name """ with self.assertRaises(TemplateDoesNotExist): self.engine.render_to_string('exception01') @setup({'exception02': '{% extends nonexistent %}'}) def test_exception02(self): """ Raise exception for invalid variable template name """ if self.engine.string_if_invalid: with self.assertRaises(TemplateDoesNotExist): self.engine.render_to_string('exception02') else: with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('exception02') @setup( {'exception03': "{% extends 'inheritance01' %}" "{% block first %}2{% endblock %}{% extends 'inheritance16' %}"}, inheritance_templates, ) def test_exception03(self): """ Raise exception for extra {% extends %} tags """ with self.assertRaises(TemplateSyntaxError): self.engine.get_template('exception03') @setup( {'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"}, inheritance_templates, ) def test_exception04(self): """ Raise exception for custom tags used in child with {% load %} tag in parent, not in child """ with self.assertRaises(TemplateSyntaxError): self.engine.get_template('exception04') @setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'}) def test_exception05(self): """ Raise exception for block.super used in base template """ with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('exception05')
bsd-3-clause
wkennington/rethinkdb
external/v8_3.30.33.16/testing/gmock/gtest/scripts/common.py
1180
2919
# Copyright 2013 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Shared utilities for writing scripts for Google Test/Mock.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import re # Matches the line from 'svn info .' output that describes what SVN # path the current local directory corresponds to. For example, in # a googletest SVN workspace's trunk/test directory, the output will be: # # URL: https://googletest.googlecode.com/svn/trunk/test _SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)') def GetCommandOutput(command): """Runs the shell command and returns its stdout as a list of lines.""" f = os.popen(command, 'r') lines = [line.strip() for line in f.readlines()] f.close() return lines def GetSvnInfo(): """Returns the project name and the current SVN workspace's root path.""" for line in GetCommandOutput('svn info .'): m = _SVN_INFO_URL_RE.match(line) if m: project = m.group(1) # googletest or googlemock rel_path = m.group(2) root = os.path.realpath(rel_path.count('/') * '../') return project, root return None, None def GetSvnTrunk(): """Returns the current SVN workspace's trunk root path.""" _, root = GetSvnInfo() return root + '/trunk' if root else None def IsInGTestSvn(): project, _ = GetSvnInfo() return project == 'googletest' def IsInGMockSvn(): project, _ = GetSvnInfo() return project == 'googlemock'
agpl-3.0
fccagou/pynotif
blink1/blink1_pyusb.py
1
4544
""" blink1_pyusb.py -- blink(1) Python library using PyUSB Uses "PyUSB 1.0" to do direct USB HID commands See: https://github.com/walac/pyusb Linux (Ubuntu/Debian): % sudo apt-get install pip % sudo pip install pyusb Note: will give "not claimed" error or similar. Try blink1.py instead Mac OS X: do "brew install libusb" on osx or "port install py26-pyusb-devel" on osx Windows: libusb-win32 (inf method) on windows? Based on blink1hid-demo.py by Aaron Blondeau 2013, Tod E. Kurt, http://thingm.com/ """ import sys import usb import time import string report_id = 0x01 debug_rw = False class Blink1: def __init__(self): self.dev = None return self.find() def find(self): self.dev = usb.core.find(idVendor=0x27b8, idProduct=0x01ed) if( self.dev == None ): return None #print "kernel_driver_active:%i" % (self.dev.is_kernel_driver_active(0)) if( self.dev.is_kernel_driver_active(0) ): try: self.dev.detach_kernel_driver(0) except usb.core.USBError as e: sys.exit("Could not detatch kernel driver: %s" % str(e)) #self.dev.set_configuration() def enumerate(self): return self.find() def open(self): self.close() return self.find() def close(self): if self.dev != None: self.dev = None # FIXME: what's equivalent to: close(self.dev) def notfound(self): return None # fixme what to do here def write(self,buf): """ Write command to blink(1) Send USB Feature Report 0x01 to blink(1) with 8-byte payload Note: arg 'buf' must be 8 bytes or bad things happen """ if debug_rw : print "blink1write:"+",".join('0x%02x' % v for v in buf) if( self.dev == None ): return self.notfound() bmRequestTypeOut = usb.util.build_request_type(usb.util.CTRL_OUT, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE) self.dev.ctrl_transfer( bmRequestTypeOut, 0x09, # == HID set_report (3 << 8) | report_id, # (3==HID feat.report) 0, buf) def read(self): """ Read command result from blink(1) Receive USB Feature Report 0x01 from blink(1) with 8-byte payload Note: buf must be 8 bytes or bad things happen """ bmRequestTypeIn = usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE) buf = self.dev.ctrl_transfer( bmRequestTypeIn, 0x01, # == HID get_report (3 << 8) | report_id, 0, 8 ) # == number of bytes to read if debug_rw : print "blink1read: "+",".join('0x%02x' % v for v in buf) return buf def fade_to_rgbn(self, fadeMillis, red,green,blue, ledn): """ Command blink(1) to fade to RGB color """ action = ord('c') fadeMillis = fadeMillis/10 th = (fadeMillis & 0xff00) >> 8 tl = fadeMillis & 0x00ff buf = [report_id, action, red,green,blue, th,tl, ledn] return self.write(buf) def fade_to_rgb(self, fadeMillis, red,green,blue): """ Command blink(1) to fade to RGB color """ return self.fade_to_rgbn(fadeMillis, red,green,blue,0) def playloop(self, play,startpos,endpos,count): """ """ buf = [0x01, ord('p'), play, startpos, endpos, count, 0,0 ] return self.write(buf) def play(self, play,startpos): """ """ return self.playloop( play, startpos, 0,0) def get_version(self): """ Get blink(1) firmware version """ if( self.dev == None ): return '' buf = [0x01, ord('v'), 0,0, 0,0,0,0] self.write(buf) time.sleep(.05) version_raw = self.read() version = (version_raw[3]-ord('0'))*100 + (version_raw[4]-ord('0')) return str(version) def get_serialnumber(self): """ Get blink(1) serial number """ if( self.dev == None ): return '' return usb.util.get_string(self.dev, 256, 3) def get_serialnumbers(self): # FIXME: seriallist = [] seriallist.append( self.get_serialnumber() ) return seriallist
gpl-2.0
lowitty/server
libsLinux/zope/interface/verify.py
53
4727
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Verify interface implementations """ from zope.interface.exceptions import BrokenImplementation, DoesNotImplement from zope.interface.exceptions import BrokenMethodImplementation from types import FunctionType, MethodType from zope.interface.interface import fromMethod, fromFunction, Method import sys # This will be monkey-patched when running under Zope 2, so leave this # here: MethodTypes = (MethodType, ) def _verify(iface, candidate, tentative=0, vtype=None): """Verify that 'candidate' might correctly implements 'iface'. This involves: o Making sure the candidate defines all the necessary methods o Making sure the methods have the correct signature o Making sure the candidate asserts that it implements the interface Note that this isn't the same as verifying that the class does implement the interface. If optional tentative is true, suppress the "is implemented by" test. """ if vtype == 'c': tester = iface.implementedBy else: tester = iface.providedBy if not tentative and not tester(candidate): raise DoesNotImplement(iface) # Here the `desc` is either an `Attribute` or `Method` instance for name, desc in iface.namesAndDescriptions(1): try: attr = getattr(candidate, name) except AttributeError: if (not isinstance(desc, Method)) and vtype == 'c': # We can't verify non-methods on classes, since the # class may provide attrs in it's __init__. continue raise BrokenImplementation(iface, name) if not isinstance(desc, Method): # If it's not a method, there's nothing else we can test continue if isinstance(attr, FunctionType): if sys.version[0] == '3' and isinstance(candidate, type): # This is an "unbound method" in Python 3. meth = fromFunction(attr, iface, name=name, imlevel=1) #pragma NO COVERAGE else: # Nope, just a normal function meth = fromFunction(attr, iface, name=name) elif (isinstance(attr, MethodTypes) and type(attr.__func__) is FunctionType): meth = fromMethod(attr, iface, name) elif isinstance(attr, property) and vtype == 'c': # We without an instance we cannot be sure it's not a # callable. continue else: if not callable(attr): raise BrokenMethodImplementation(name, "Not a method") # sigh, it's callable, but we don't know how to introspect it, so # we have to give it a pass. continue #pragma NO COVERAGE # Make sure that the required and implemented method signatures are # the same. desc = desc.getSignatureInfo() meth = meth.getSignatureInfo() mess = _incompat(desc, meth) if mess: raise BrokenMethodImplementation(name, mess) return True def verifyClass(iface, candidate, tentative=0): return _verify(iface, candidate, tentative, vtype='c') def verifyObject(iface, candidate, tentative=0): return _verify(iface, candidate, tentative, vtype='o') def _incompat(required, implemented): #if (required['positional'] != # implemented['positional'][:len(required['positional'])] # and implemented['kwargs'] is None): # return 'imlementation has different argument names' if len(implemented['required']) > len(required['required']): return 'implementation requires too many arguments' if ((len(implemented['positional']) < len(required['positional'])) and not implemented['varargs']): return "implementation doesn't allow enough arguments" if required['kwargs'] and not implemented['kwargs']: return "implementation doesn't support keyword arguments" if required['varargs'] and not implemented['varargs']: return "implementation doesn't support variable arguments"
mit
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
1
22538
"""API for communicating with twitch""" from __future__ import absolute_import import functools import logging import os import threading import m3u8 import oauthlib.oauth2 import requests import requests.utils import requests_oauthlib from pytwitcherapi.chat import client from . import constants, exceptions, models, oauth __all__ = ['needs_auth', 'TwitchSession'] log = logging.getLogger(__name__) TWITCH_KRAKENURL = 'https://api.twitch.tv/kraken/' """The baseurl for the twitch api""" TWITCH_HEADER_ACCEPT = 'application/vnd.twitchtv.v3+json' """The header for the ``Accept`` key to tell twitch which api version it should use""" TWITCH_USHERURL = 'http://usher.twitch.tv/api/' """The baseurl for the twitch usher api""" TWITCH_APIURL = 'http://api.twitch.tv/api/' """The baseurl for the old twitch api""" TWITCH_STATUSURL = 'http://twitchstatus.com/api/status?type=chat' AUTHORIZATION_BASE_URL = 'https://api.twitch.tv/kraken/oauth2/authorize' """Authorisation Endpoint""" CLIENT_ID = os.environ.get("PYTWITCHER_CLIENT_ID") or '642a2vtmqfumca8hmfcpkosxlkmqifb' """The client id of pytwitcher on twitch. Use environment variable ``PYTWITCHER_CLIENT_ID`` or pytwitcher default value. """ SCOPES = ['user_read', 'chat_login'] """The scopes that PyTwitcher needs""" def needs_auth(meth): """Wraps a method of :class:`TwitchSession` and raises an :class:`exceptions.NotAuthorizedError` if before calling the method, the session isn't authorized. :param meth: :type meth: :returns: the wrapped method :rtype: Method :raises: None """ @functools.wraps(meth) def wrapped(*args, **kwargs): if not args[0].authorized: raise exceptions.NotAuthorizedError('Please login first!') return meth(*args, **kwargs) return wrapped class OAuthSession(requests_oauthlib.OAuth2Session): """Session with oauth2 support. You can still use http requests. """ def __init__(self): """Initialize a new oauth session :raises: None """ client = oauth.TwitchOAuthClient(client_id=CLIENT_ID) super(OAuthSession, self).__init__(client_id=CLIENT_ID, client=client, scope=SCOPES, redirect_uri=constants.REDIRECT_URI) self.login_server = None """The server that handles the login redirect""" self.login_thread = None """The thread that serves the login server""" def request(self, method, url, **kwargs): """Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ if oauthlib.oauth2.is_secure_transport(url): m = super(OAuthSession, self).request else: m = super(requests_oauthlib.OAuth2Session, self).request log.debug("%s \"%s\" with %s", method, url, kwargs) response = m(method, url, **kwargs) response.raise_for_status() return response def start_login_server(self, ): """Start a server that will get a request from a user logging in. This uses the Implicit Grant Flow of OAuth2. The user is asked to login to twitch and grant PyTwitcher authorization. Once the user agrees, he is redirected to an url. This server will respond to that url and get the oauth token. The server serves in another thread. To shut him down, call :meth:`TwitchSession.shutdown_login_server`. This sets the :data:`TwitchSession.login_server`, :data:`TwitchSession.login_thread` variables. :returns: The created server :rtype: :class:`BaseHTTPServer.HTTPServer` :raises: None """ self.login_server = oauth.LoginServer(session=self) target = self.login_server.serve_forever self.login_thread = threading.Thread(target=target) self.login_thread.setDaemon(True) log.debug('Starting login server thread.') self.login_thread.start() def shutdown_login_server(self, ): """Shutdown the login server and thread :returns: None :rtype: None :raises: None """ log.debug('Shutting down the login server thread.') self.login_server.shutdown() self.login_server.server_close() self.login_thread.join() def get_auth_url(self, ): """Return the url for the user to authorize PyTwitcher :returns: The url the user should visit to authorize PyTwitcher :rtype: :class:`str` :raises: None """ return self.authorization_url(AUTHORIZATION_BASE_URL)[0] class TwitchSession(OAuthSession): """Session for making requests to the twitch api Use :meth:`TwitchSession.kraken_request`, :meth:`TwitchSession.usher_request`, :meth:`TwitchSession.oldapi_request` to make easier calls to the api directly. To get authorization, the user has to grant PyTwitcher access. The workflow goes like this: 1. Start the login server with :meth:`TwitchSession.start_login_server`. 2. User should visit :meth:`TwitchSession.get_auth_url` in his browser and follow insturctions (e.g Login and Allow PyTwitcher). 3. Check if the session is authorized with :meth:`TwitchSession.authorized`. 4. Shut the login server down with :meth:`TwitchSession.shutdown_login_server`. Now you can use methods that need authorization. """ def __init__(self): """Initialize a new TwitchSession :raises: None """ super(TwitchSession, self).__init__() self.baseurl = '' """The baseurl that gets prepended to every request url""" self.current_user = None """The currently logined user.""" self._token = None """The oauth token""" @property def token(self, ): """Return the oauth token :returns: the token :rtype: :class:`dict` :raises: None """ return self._token @token.setter def token(self, token): """Set the oauth token and the current_user :param token: the oauth token :type token: :class:`dict` :returns: None :rtype: None :raises: None """ self._token = token if token: self.current_user = self.query_login_user() def kraken_request(self, method, endpoint, **kwargs): """Make a request to one of the kraken api endpoints. Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`. Also the client id from :data:`CLIENT_ID` will be set. The url will be constructed of :data:`TWITCH_KRAKENURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the kraken api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_KRAKENURL + endpoint headers = kwargs.setdefault('headers', {}) headers['Accept'] = TWITCH_HEADER_ACCEPT headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits return self.request(method, url, **kwargs) def usher_request(self, method, endpoint, **kwargs): """Make a request to one of the usher api endpoints. The url will be constructed of :data:`TWITCH_USHERURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the usher api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_USHERURL + endpoint return self.request(method, url, **kwargs) def oldapi_request(self, method, endpoint, **kwargs): """Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ headers = kwargs.setdefault('headers', {}) headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits url = TWITCH_APIURL + endpoint return self.request(method, url, **kwargs) def fetch_viewers(self, game): """Query the viewers and channels of the given game and set them on the object :returns: the given game :rtype: :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'streams/summary', params={'game': game.name}).json() game.viewers = r['viewers'] game.channels = r['channels'] return game def search_games(self, query, live=True): """Search for games that are similar to the query :param query: the query string :type query: :class:`str` :param live: If true, only returns games that are live on at least one channel :type live: :class:`bool` :returns: A list of games :rtype: :class:`list` of :class:`models.Game` instances :raises: None """ r = self.kraken_request('GET', 'search/games', params={'query': query, 'type': 'suggest', 'live': live}) games = models.Game.wrap_search(r) for g in games: self.fetch_viewers(g) return games def top_games(self, limit=10, offset=0): """Return the current top games :param limit: the maximum amount of top games to query :type limit: :class:`int` :param offset: the offset in the top games :type offset: :class:`int` :returns: a list of top games :rtype: :class:`list` of :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'games/top', params={'limit': limit, 'offset': offset}) return models.Game.wrap_topgames(r) def get_game(self, name): """Get the game instance for a game name :param name: the name of the game :type name: :class:`str` :returns: the game instance :rtype: :class:`models.Game` | None :raises: None """ games = self.search_games(query=name, live=False) for g in games: if g.name == name: return g def get_channel(self, name): """Return the channel for the given name :param name: the channel name :type name: :class:`str` :returns: the model instance :rtype: :class:`models.Channel` :raises: None """ r = self.kraken_request('GET', 'channels/' + name) return models.Channel.wrap_get_channel(r) def search_channels(self, query, limit=25, offset=0): """Search for channels and return them :param query: the query string :type query: :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of channels :rtype: :class:`list` of :class:`models.Channel` instances :raises: None """ r = self.kraken_request('GET', 'search/channels', params={'query': query, 'limit': limit, 'offset': offset}) return models.Channel.wrap_search(r) def get_stream(self, channel): """Return the stream of the given channel :param channel: the channel that is broadcasting. Either name or models.Channel instance :type channel: :class:`str` | :class:`models.Channel` :returns: the stream or None, if the channel is offline :rtype: :class:`models.Stream` | None :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.kraken_request('GET', 'streams/' + channel) return models.Stream.wrap_get_stream(r) def get_streams(self, game=None, channels=None, limit=25, offset=0): """Return a list of streams queried by a number of parameters sorted by number of viewers descending :param game: the game or name of the game :type game: :class:`str` | :class:`models.Game` :param channels: list of models.Channels or channel names (can be mixed) :type channels: :class:`list` of :class:`models.Channel` or :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` :raises: None """ if isinstance(game, models.Game): game = game.name channelnames = [] cparam = None if channels: for c in channels: if isinstance(c, models.Channel): c = c.name channelnames.append(c) cparam = ','.join(channelnames) params = {'limit': limit, 'offset': offset, 'game': game, 'channel': cparam} r = self.kraken_request('GET', 'streams', params=params) return models.Stream.wrap_search(r) def search_streams(self, query, hls=False, limit=25, offset=0): """Search for streams and return them :param query: the query string :type query: :class:`str` :param hls: If true, only return streams that have hls stream :type hls: :class:`bool` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` instances :raises: None """ r = self.kraken_request('GET', 'search/streams', params={'query': query, 'hls': hls, 'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r) @needs_auth def followed_streams(self, limit=25, offset=0): """Return the streams the current user follows. Needs authorization ``user_read``. :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list`of :class:`models.Stream` instances :raises: :class:`exceptions.NotAuthorizedError` """ r = self.kraken_request('GET', 'streams/followed', params={'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r) def get_user(self, name): """Get the user for the given name :param name: The username :type name: :class:`str` :returns: the user instance :rtype: :class:`models.User` :raises: None """ r = self.kraken_request('GET', 'user/' + name) return models.User.wrap_get_user(r) @needs_auth def query_login_user(self, ): """Query and return the currently logined user :returns: The user instance :rtype: :class:`models.User` :raises: :class:`exceptions.NotAuthorizedError` """ r = self.kraken_request('GET', 'user') return models.User.wrap_get_user(r) def get_playlist(self, channel): """Return the playlist for the given channel :param channel: the channel :type channel: :class:`models.Channel` | :class:`str` :returns: the playlist :rtype: :class:`m3u8.M3U8` :raises: :class:`requests.HTTPError` if channel is offline. """ if isinstance(channel, models.Channel): channel = channel.name token, sig = self.get_channel_access_token(channel) params = {'token': token, 'sig': sig, 'allow_audio_only': True, 'allow_source': True} r = self.usher_request( 'GET', 'channel/hls/%s.m3u8' % channel, params=params) playlist = m3u8.loads(r.text) return playlist def get_quality_options(self, channel): """Get the available quality options for streams of the given channel Possible values in the list: * source * high * medium * low * mobile * audio :param channel: the channel or channel name :type channel: :class:`models.Channel` | :class:`str` :returns: list of quality options :rtype: :class:`list` of :class:`str` :raises: :class:`requests.HTTPError` if channel is offline. """ optionmap = {'chunked': 'source', 'high': 'high', 'medium': 'medium', 'low': 'low', 'mobile': 'mobile', 'audio_only': 'audio'} p = self.get_playlist(channel) options = [] for pl in p.playlists: q = pl.media[0].group_id options.append(optionmap[q]) return options def get_channel_access_token(self, channel): """Return the token and sig for the given channel :param channel: the channel or channel name to get the access token for :type channel: :class:`channel` | :class:`str` :returns: The token and sig for the given channel :rtype: (:class:`unicode`, :class:`unicode`) :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.oldapi_request( 'GET', 'channels/%s/access_token' % channel).json() return r['token'], r['sig'] def get_chat_server(self, channel): """Get an appropriate chat server for the given channel Usually the server is irc.twitch.tv. But because of the delicate twitch chat, they use a lot of servers. Big events are on special event servers. This method tries to find a good one. :param channel: the channel with the chat :type channel: :class:`models.Channel` :returns: the server address and port :rtype: (:class:`str`, :class:`int`) :raises: None """ r = self.oldapi_request( 'GET', 'channels/%s/chat_properties' % channel.name) json = r.json() servers = json['chat_servers'] try: r = self.get(TWITCH_STATUSURL) except requests.HTTPError: log.debug('Error getting chat server status. Using random one.') address = servers[0] else: stats = [client.ChatServerStatus(**d) for d in r.json()] address = self._find_best_chat_server(servers, stats) server, port = address.split(':') return server, int(port) @staticmethod def _find_best_chat_server(servers, stats): """Find the best from servers by comparing with the stats :param servers: a list if server adresses, e.g. ['0.0.0.0:80'] :type servers: :class:`list` of :class:`str` :param stats: list of server statuses :type stats: :class:`list` of :class:`chat.ChatServerStatus` :returns: the best server adress :rtype: :class:`str` :raises: None """ best = servers[0] # In case we sind no match with any status stats.sort() # gets sorted for performance for stat in stats: for server in servers: if server == stat: # found a chatserver that has the same address # than one of the chatserverstats. # since the stats are sorted for performance # the first hit is the best, thus break best = server break if best: # already found one, so no need to check the other # statuses, which are worse break return best def get_emote_picture(self, emote, size=1.0): """Return the picture for the given emote :param emote: the emote object :type emote: :class:`pytwitcherapi.chat.message.Emote` :param size: the size of the picture. Choices are: 1.0, 2.0, 3.0 :type size: :class:`float` :returns: A string resembling the picturedata of the emote :rtype: :class:`str` :raises: None """ r = self.get('http://static-cdn.jtvnw.net/emoticons/v1/%s/%s' % (emote.emoteid, size)) return r.content
bsd-3-clause
kevingu1003/python-pptx
tests/unitutil/file.py
5
1816
# encoding: utf-8 """ Utility functions for loading files for unit testing """ import os import sys from lxml import etree from pptx.oxml import oxml_parser _thisdir = os.path.split(__file__)[0] test_file_dir = os.path.abspath(os.path.join(_thisdir, '..', 'test_files')) def abspath(relpath): thisdir = os.path.split(__file__)[0] return os.path.abspath(os.path.join(thisdir, relpath)) def absjoin(*paths): return os.path.abspath(os.path.join(*paths)) def docx_path(name): """ Return the absolute path to test .docx file with root name *name*. """ return absjoin(test_file_dir, '%s.docx' % name) def parse_xml_file(file_): """ Return ElementTree for XML contained in *file_* """ return etree.parse(file_, oxml_parser) def snippet_seq(name, offset=0, count=sys.maxsize): """ Return a tuple containing the unicode text snippets read from the snippet file having *name*. Snippets are delimited by a blank line. If specified, *count* snippets starting at *offset* are returned. """ path = os.path.join(test_file_dir, 'snippets', '%s.txt' % name) with open(path, 'rb') as f: text = f.read().decode('utf-8') snippets = text.split('\n\n') start, end = offset, offset+count return tuple(snippets[start:end]) def snippet_text(snippet_file_name): """ Return the unicode text read from the test snippet file having *snippet_file_name*. """ snippet_file_path = os.path.join( test_file_dir, 'snippets', '%s.txt' % snippet_file_name ) with open(snippet_file_path, 'rb') as f: snippet_bytes = f.read() return snippet_bytes.decode('utf-8') def testfile(name): """ Return the absolute path to test file having *name*. """ return absjoin(test_file_dir, name)
mit
miguelfervi/SSBW-Restaurantes
restaurantes/lib/python2.7/site-packages/django/contrib/gis/gdal/raster/source.py
297
13274
import json import os from ctypes import addressof, byref, c_double, c_void_p from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.driver import Driver from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.prototypes import raster as capi from django.contrib.gis.gdal.raster.band import BandList from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS from django.contrib.gis.gdal.srs import SpatialReference, SRSException from django.contrib.gis.geometry.regex import json_regex from django.utils import six from django.utils.encoding import ( force_bytes, force_text, python_2_unicode_compatible, ) from django.utils.functional import cached_property class TransformPoint(list): indices = { 'origin': (0, 3), 'scale': (1, 5), 'skew': (2, 4), } def __init__(self, raster, prop): x = raster.geotransform[self.indices[prop][0]] y = raster.geotransform[self.indices[prop][1]] list.__init__(self, [x, y]) self._raster = raster self._prop = prop @property def x(self): return self[0] @x.setter def x(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][0]] = value self._raster.geotransform = gtf @property def y(self): return self[1] @y.setter def y(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][1]] = value self._raster.geotransform = gtf @python_2_unicode_compatible class GDALRaster(GDALBase): """ Wraps a raster GDAL Data Source object. """ def __init__(self, ds_input, write=False): self._write = 1 if write else 0 Driver.ensure_registered() # Preprocess json inputs. This converts json strings to dictionaries, # which are parsed below the same way as direct dictionary inputs. if isinstance(ds_input, six.string_types) and json_regex.match(ds_input): ds_input = json.loads(ds_input) # If input is a valid file path, try setting file as source. if isinstance(ds_input, six.string_types): if not os.path.exists(ds_input): raise GDALException('Unable to read raster source input "{}"'.format(ds_input)) try: # GDALOpen will auto-detect the data source type. self._ptr = capi.open_ds(force_bytes(ds_input), self._write) except GDALException as err: raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err)) elif isinstance(ds_input, dict): # A new raster needs to be created in write mode self._write = 1 # Create driver (in memory by default) driver = Driver(ds_input.get('driver', 'MEM')) # For out of memory drivers, check filename argument if driver.name != 'MEM' and 'name' not in ds_input: raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name)) # Check if width and height where specified if 'width' not in ds_input or 'height' not in ds_input: raise GDALException('Specify width and height attributes for JSON or dict input.') # Check if srid was specified if 'srid' not in ds_input: raise GDALException('Specify srid for JSON or dict input.') # Create GDAL Raster self._ptr = capi.create_ds( driver._ptr, force_bytes(ds_input.get('name', '')), ds_input['width'], ds_input['height'], ds_input.get('nr_of_bands', len(ds_input.get('bands', []))), ds_input.get('datatype', 6), None ) # Set band data if provided for i, band_input in enumerate(ds_input.get('bands', [])): band = self.bands[i] band.data(band_input['data']) if 'nodata_value' in band_input: band.nodata_value = band_input['nodata_value'] # Set SRID self.srs = ds_input.get('srid') # Set additional properties if provided if 'origin' in ds_input: self.origin.x, self.origin.y = ds_input['origin'] if 'scale' in ds_input: self.scale.x, self.scale.y = ds_input['scale'] if 'skew' in ds_input: self.skew.x, self.skew.y = ds_input['skew'] elif isinstance(ds_input, c_void_p): # Instantiate the object using an existing pointer to a gdal raster. self._ptr = ds_input else: raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input))) def __del__(self): if self._ptr and capi: capi.close_ds(self._ptr) def __str__(self): return self.name def __repr__(self): """ Short-hand representation because WKB may be very large. """ return '<Raster object at %s>' % hex(addressof(self._ptr)) def _flush(self): """ Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed. """ # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException('Raster needs to be opened in write mode to change values.') capi.flush_ds(self._ptr) @property def name(self): """ Returns the name of this raster. Corresponds to filename for file-based rasters. """ return force_text(capi.get_ds_description(self._ptr)) @cached_property def driver(self): """ Returns the GDAL Driver used for this raster. """ ds_driver = capi.get_ds_driver(self._ptr) return Driver(ds_driver) @property def width(self): """ Width (X axis) in pixels. """ return capi.get_ds_xsize(self._ptr) @property def height(self): """ Height (Y axis) in pixels. """ return capi.get_ds_ysize(self._ptr) @property def srs(self): """ Returns the SpatialReference used in this GDALRaster. """ try: wkt = capi.get_ds_projection_ref(self._ptr) if not wkt: return None return SpatialReference(wkt, srs_type='wkt') except SRSException: return None @srs.setter def srs(self, value): """ Sets the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor. """ if isinstance(value, SpatialReference): srs = value elif isinstance(value, six.integer_types + six.string_types): srs = SpatialReference(value) else: raise ValueError('Could not create a SpatialReference from input.') capi.set_ds_projection_ref(self._ptr, srs.wkt.encode()) self._flush() @property def geotransform(self): """ Returns the geotransform of the data source. Returns the default geotransform if it does not exist or has not been set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0]. """ # Create empty ctypes double array for data gtf = (c_double * 6)() capi.get_ds_geotransform(self._ptr, byref(gtf)) return list(gtf) @geotransform.setter def geotransform(self, values): "Sets the geotransform for the data source." if sum([isinstance(x, (int, float)) for x in values]) != 6: raise ValueError('Geotransform must consist of 6 numeric values.') # Create ctypes double array with input and write data values = (c_double * 6)(*values) capi.set_ds_geotransform(self._ptr, byref(values)) self._flush() @property def origin(self): """ Coordinates of the raster origin. """ return TransformPoint(self, 'origin') @property def scale(self): """ Pixel scale in units of the raster projection. """ return TransformPoint(self, 'scale') @property def skew(self): """ Skew of pixels (rotation parameters). """ return TransformPoint(self, 'skew') @property def extent(self): """ Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax). """ # Calculate boundary values based on scale and size xval = self.origin.x + self.scale.x * self.width yval = self.origin.y + self.scale.y * self.height # Calculate min and max values xmin = min(xval, self.origin.x) xmax = max(xval, self.origin.x) ymin = min(yval, self.origin.y) ymax = max(yval, self.origin.y) return xmin, ymin, xmax, ymax @property def bands(self): return BandList(self) def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0): """ Returns a warped GDALRaster with the given input characteristics. The input is expected to be a dictionary containing the parameters of the target raster. Allowed values are width, height, SRID, origin, scale, skew, datatype, driver, and name (filename). By default, the warp functions keeps all parameters equal to the values of the original source raster. For the name of the target raster, the name of the source raster will be used and appended with _copy. + source_driver_name. In addition, the resampling algorithm can be specified with the "resampling" input parameter. The default is NearestNeighbor. For a list of all options consult the GDAL_RESAMPLE_ALGORITHMS constant. """ # Get the parameters defining the geotransform, srid, and size of the raster if 'width' not in ds_input: ds_input['width'] = self.width if 'height' not in ds_input: ds_input['height'] = self.height if 'srid' not in ds_input: ds_input['srid'] = self.srs.srid if 'origin' not in ds_input: ds_input['origin'] = self.origin if 'scale' not in ds_input: ds_input['scale'] = self.scale if 'skew' not in ds_input: ds_input['skew'] = self.skew # Get the driver, name, and datatype of the target raster if 'driver' not in ds_input: ds_input['driver'] = self.driver.name if 'name' not in ds_input: ds_input['name'] = self.name + '_copy.' + self.driver.name if 'datatype' not in ds_input: ds_input['datatype'] = self.bands[0].datatype() # Set the number of bands ds_input['nr_of_bands'] = len(self.bands) # Create target raster target = GDALRaster(ds_input, write=True) # Copy nodata values to warped raster for index, band in enumerate(self.bands): target.bands[index].nodata_value = band.nodata_value # Select resampling algorithm algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Reproject image capi.reproject_image( self._ptr, self.srs.wkt.encode(), target._ptr, target.srs.wkt.encode(), algorithm, 0.0, max_error, c_void_p(), c_void_p(), c_void_p() ) # Make sure all data is written to file target._flush() return target def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour', max_error=0.0): """ Returns a copy of this raster reprojected into the given SRID. """ # Convert the resampling algorithm name into an algorithm id algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Instantiate target spatial reference system target_srs = SpatialReference(srid) # Create warped virtual dataset in the target reference system target = capi.auto_create_warped_vrt( self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(), algorithm, max_error, c_void_p() ) target = GDALRaster(target) # Construct the target warp dictionary from the virtual raster data = { 'srid': srid, 'width': target.width, 'height': target.height, 'origin': [target.origin.x, target.origin.y], 'scale': [target.scale.x, target.scale.y], 'skew': [target.skew.x, target.skew.y], } # Set the driver and filepath if provided if driver: data['driver'] = driver if name: data['name'] = name # Warp the raster into new srid return self.warp(data, resampling=resampling, max_error=max_error)
gpl-3.0
sorenk/ansible
lib/ansible/modules/utilities/logic/assert.py
67
1396
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2012 Dag Wieers <dag@wieers.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: assert short_description: Asserts given expressions are true description: - This module asserts that given expressions are true with an optional custom message. - This module is also supported for Windows targets. version_added: "1.5" options: that: description: - "A string expression of the same form that can be passed to the 'when' statement" - "Alternatively, a list of string expressions" required: true msg: description: - "The customized message used for a failing assertion" notes: - This module is also supported for Windows targets. author: - "Ansible Core Team" - "Michael DeHaan" ''' EXAMPLES = ''' - assert: { that: "ansible_os_family != 'RedHat'" } - assert: that: - "'foo' in some_command_result.stdout" - "number_of_the_counting == 3" - assert: that: - "my_param <= 100" - "my_param >= 0" msg: "'my_param' must be between 0 and 100" '''
gpl-3.0
mavenlin/tensorflow
tensorflow/tools/docs/doc_generator_visitor_test.py
118
4695
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tools.docs.doc_generator_visitor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import googletest from tensorflow.tools.docs import doc_generator_visitor class DocGeneratorVisitorTest(googletest.TestCase): def test_call_module(self): visitor = doc_generator_visitor.DocGeneratorVisitor() visitor( 'doc_generator_visitor', doc_generator_visitor, [('DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor)]) self.assertEqual({'doc_generator_visitor': ['DocGeneratorVisitor']}, visitor.tree) self.assertEqual({ 'doc_generator_visitor': doc_generator_visitor, 'doc_generator_visitor.DocGeneratorVisitor': doc_generator_visitor.DocGeneratorVisitor, }, visitor.index) def test_call_class(self): visitor = doc_generator_visitor.DocGeneratorVisitor() visitor( 'DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor, [('index', doc_generator_visitor.DocGeneratorVisitor.index)]) self.assertEqual({'DocGeneratorVisitor': ['index']}, visitor.tree) self.assertEqual({ 'DocGeneratorVisitor': doc_generator_visitor.DocGeneratorVisitor, 'DocGeneratorVisitor.index': doc_generator_visitor.DocGeneratorVisitor.index }, visitor.index) def test_call_raises(self): visitor = doc_generator_visitor.DocGeneratorVisitor() with self.assertRaises(RuntimeError): visitor('non_class_or_module', 'non_class_or_module_object', []) def test_duplicates(self): visitor = doc_generator_visitor.DocGeneratorVisitor() visitor( 'submodule.DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor, [('index', doc_generator_visitor.DocGeneratorVisitor.index), ('index2', doc_generator_visitor.DocGeneratorVisitor.index)]) visitor( 'submodule2.DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor, [('index', doc_generator_visitor.DocGeneratorVisitor.index), ('index2', doc_generator_visitor.DocGeneratorVisitor.index)]) visitor( 'DocGeneratorVisitor2', doc_generator_visitor.DocGeneratorVisitor, [('index', doc_generator_visitor.DocGeneratorVisitor.index), ('index2', doc_generator_visitor.DocGeneratorVisitor.index)]) # The shorter path should be master, or if equal, the lexicographically # first will be. self.assertEqual( {'DocGeneratorVisitor2': sorted(['submodule.DocGeneratorVisitor', 'submodule2.DocGeneratorVisitor', 'DocGeneratorVisitor2']), 'DocGeneratorVisitor2.index': sorted([ 'submodule.DocGeneratorVisitor.index', 'submodule.DocGeneratorVisitor.index2', 'submodule2.DocGeneratorVisitor.index', 'submodule2.DocGeneratorVisitor.index2', 'DocGeneratorVisitor2.index', 'DocGeneratorVisitor2.index2' ]), }, visitor.duplicates) self.assertEqual({ 'submodule.DocGeneratorVisitor': 'DocGeneratorVisitor2', 'submodule.DocGeneratorVisitor.index': 'DocGeneratorVisitor2.index', 'submodule.DocGeneratorVisitor.index2': 'DocGeneratorVisitor2.index', 'submodule2.DocGeneratorVisitor': 'DocGeneratorVisitor2', 'submodule2.DocGeneratorVisitor.index': 'DocGeneratorVisitor2.index', 'submodule2.DocGeneratorVisitor.index2': 'DocGeneratorVisitor2.index', 'DocGeneratorVisitor2.index2': 'DocGeneratorVisitor2.index' }, visitor.duplicate_of) self.assertEqual({ id(doc_generator_visitor.DocGeneratorVisitor): 'DocGeneratorVisitor2', id(doc_generator_visitor.DocGeneratorVisitor.index): 'DocGeneratorVisitor2.index', }, visitor.reverse_index) if __name__ == '__main__': googletest.main()
apache-2.0
drexly/tonginBlobStore
lib/django/conf/global_settings.py
56
22768
# Default Django settings. Override these with settings in the module # pointed-to by the DJANGO_SETTINGS_MODULE environment variable. # This is defined here as a do-nothing function because we can't import # django.utils.translation -- that module depends on the settings. gettext_noop = lambda s: s #################### # CORE # #################### DEBUG = False TEMPLATE_DEBUG = False # Whether the framework should propagate raw exceptions rather than catching # them. This is useful under some testing situations and should never be used # on a live site. DEBUG_PROPAGATE_EXCEPTIONS = False # Whether to use the "Etag" header. This saves bandwidth but slows down performance. USE_ETAGS = False # People who get code error notifications. # In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')] ADMINS = [] # List of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = [] # Hosts/domain names that are valid for this site. # "*" matches anything, ".example.com" matches example.com and all subdomains ALLOWED_HOSTS = [] # Local time zone for this installation. All choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all # systems may support all possibilities). When USE_TZ is True, this is # interpreted as the default user time zone. TIME_ZONE = 'America/Chicago' # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = False # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # Languages we provide translations for, out of the box. LANGUAGES = [ ('af', gettext_noop('Afrikaans')), ('ar', gettext_noop('Arabic')), ('ast', gettext_noop('Asturian')), ('az', gettext_noop('Azerbaijani')), ('bg', gettext_noop('Bulgarian')), ('be', gettext_noop('Belarusian')), ('bn', gettext_noop('Bengali')), ('br', gettext_noop('Breton')), ('bs', gettext_noop('Bosnian')), ('ca', gettext_noop('Catalan')), ('cs', gettext_noop('Czech')), ('cy', gettext_noop('Welsh')), ('da', gettext_noop('Danish')), ('de', gettext_noop('German')), ('el', gettext_noop('Greek')), ('en', gettext_noop('English')), ('en-au', gettext_noop('Australian English')), ('en-gb', gettext_noop('British English')), ('eo', gettext_noop('Esperanto')), ('es', gettext_noop('Spanish')), ('es-ar', gettext_noop('Argentinian Spanish')), ('es-co', gettext_noop('Colombian Spanish')), ('es-mx', gettext_noop('Mexican Spanish')), ('es-ni', gettext_noop('Nicaraguan Spanish')), ('es-ve', gettext_noop('Venezuelan Spanish')), ('et', gettext_noop('Estonian')), ('eu', gettext_noop('Basque')), ('fa', gettext_noop('Persian')), ('fi', gettext_noop('Finnish')), ('fr', gettext_noop('French')), ('fy', gettext_noop('Frisian')), ('ga', gettext_noop('Irish')), ('gd', gettext_noop('Scottish Gaelic')), ('gl', gettext_noop('Galician')), ('he', gettext_noop('Hebrew')), ('hi', gettext_noop('Hindi')), ('hr', gettext_noop('Croatian')), ('hu', gettext_noop('Hungarian')), ('ia', gettext_noop('Interlingua')), ('id', gettext_noop('Indonesian')), ('io', gettext_noop('Ido')), ('is', gettext_noop('Icelandic')), ('it', gettext_noop('Italian')), ('ja', gettext_noop('Japanese')), ('ka', gettext_noop('Georgian')), ('kk', gettext_noop('Kazakh')), ('km', gettext_noop('Khmer')), ('kn', gettext_noop('Kannada')), ('ko', gettext_noop('Korean')), ('lb', gettext_noop('Luxembourgish')), ('lt', gettext_noop('Lithuanian')), ('lv', gettext_noop('Latvian')), ('mk', gettext_noop('Macedonian')), ('ml', gettext_noop('Malayalam')), ('mn', gettext_noop('Mongolian')), ('mr', gettext_noop('Marathi')), ('my', gettext_noop('Burmese')), ('nb', gettext_noop('Norwegian Bokmal')), ('ne', gettext_noop('Nepali')), ('nl', gettext_noop('Dutch')), ('nn', gettext_noop('Norwegian Nynorsk')), ('os', gettext_noop('Ossetic')), ('pa', gettext_noop('Punjabi')), ('pl', gettext_noop('Polish')), ('pt', gettext_noop('Portuguese')), ('pt-br', gettext_noop('Brazilian Portuguese')), ('ro', gettext_noop('Romanian')), ('ru', gettext_noop('Russian')), ('sk', gettext_noop('Slovak')), ('sl', gettext_noop('Slovenian')), ('sq', gettext_noop('Albanian')), ('sr', gettext_noop('Serbian')), ('sr-latn', gettext_noop('Serbian Latin')), ('sv', gettext_noop('Swedish')), ('sw', gettext_noop('Swahili')), ('ta', gettext_noop('Tamil')), ('te', gettext_noop('Telugu')), ('th', gettext_noop('Thai')), ('tr', gettext_noop('Turkish')), ('tt', gettext_noop('Tatar')), ('udm', gettext_noop('Udmurt')), ('uk', gettext_noop('Ukrainian')), ('ur', gettext_noop('Urdu')), ('vi', gettext_noop('Vietnamese')), ('zh-hans', gettext_noop('Simplified Chinese')), ('zh-hant', gettext_noop('Traditional Chinese')), ] # Languages using BiDi (right-to-left) layout LANGUAGES_BIDI = ["he", "ar", "fa", "ur"] # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True LOCALE_PATHS = [] # Settings for language cookie LANGUAGE_COOKIE_NAME = 'django_language' LANGUAGE_COOKIE_AGE = None LANGUAGE_COOKIE_DOMAIN = None LANGUAGE_COOKIE_PATH = '/' # If you set this to True, Django will format dates, numbers and calendars # according to user current locale. USE_L10N = False # Not-necessarily-technical managers of the site. They get broken link # notifications and other various emails. MANAGERS = ADMINS # Default content type and charset to use for all HttpResponse objects, if a # MIME type isn't manually specified. These are used to construct the # Content-Type header. DEFAULT_CONTENT_TYPE = 'text/html' DEFAULT_CHARSET = 'utf-8' # Encoding of files read from disk (template and initial SQL files). FILE_CHARSET = 'utf-8' # Email address that error messages come from. SERVER_EMAIL = 'root@localhost' # Database connection info. If left empty, will default to the dummy backend. DATABASES = {} # Classes used to implement DB routing behavior. DATABASE_ROUTERS = [] # The email backend to use. For possible shortcuts see django.core.mail. # The default is to use the SMTP backend. # Third-party backends can be specified by providing a Python path # to a module that defines an EmailBackend class. EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # Host for sending email. EMAIL_HOST = 'localhost' # Port for sending email. EMAIL_PORT = 25 # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False EMAIL_USE_SSL = False EMAIL_SSL_CERTFILE = None EMAIL_SSL_KEYFILE = None EMAIL_TIMEOUT = None # List of strings representing installed apps. INSTALLED_APPS = [] # List of locations of the template source files, in search order. TEMPLATE_DIRS = [] # List of callables that know how to import templates from various sources. # See the comments in django/core/template/loader.py for interface # documentation. TEMPLATE_LOADERS = [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ] # List of processors used by RequestContext to populate the context. # Each one should be a callable that takes the request object as its # only parameter and returns a dictionary to add to the context. TEMPLATE_CONTEXT_PROCESSORS = [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', # 'django.template.context_processors.request', 'django.contrib.messages.context_processors.messages', ] # Output to use in template system for invalid (e.g. misspelled) variables. TEMPLATE_STRING_IF_INVALID = '' TEMPLATES = [] # Default email address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = 'webmaster@localhost' # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = '[Django] ' # Whether to append trailing slashes to URLs. APPEND_SLASH = True # Whether to prepend the "www." subdomain to URLs that don't have it. PREPEND_WWW = False # Override the server-derived value of SCRIPT_NAME FORCE_SCRIPT_NAME = None # List of compiled regular expression objects representing User-Agent strings # that are not allowed to visit any page, systemwide. Use this for bad # robots/crawlers. Here are a few examples: # import re # DISALLOWED_USER_AGENTS = [ # re.compile(r'^NaverBot.*'), # re.compile(r'^EmailSiphon.*'), # re.compile(r'^SiteSucker.*'), # re.compile(r'^sohu-search') # ] DISALLOWED_USER_AGENTS = [] ABSOLUTE_URL_OVERRIDES = {} # List of strings representing allowed prefixes for the {% ssi %} tag. # Example: ['/home/html', '/var/www'] ALLOWED_INCLUDE_ROOTS = [] # List of compiled regular expression objects representing URLs that need not # be reported by BrokenLinkEmailsMiddleware. Here are a few examples: # import re # IGNORABLE_404_URLS = [ # re.compile(r'^/apple-touch-icon.*\.png$'), # re.compile(r'^/favicon.ico$), # re.compile(r'^/robots.txt$), # re.compile(r'^/phpmyadmin/), # re.compile(r'\.(cgi|php|pl)$'), # ] IGNORABLE_404_URLS = [] # A secret key for this particular Django installation. Used in secret-key # hashing algorithms. Set this in your settings, or Django will complain # loudly. SECRET_KEY = '' # Default file storage mechanism that holds media. DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Example: "/var/www/example.com/static/" STATIC_ROOT = None # URL that handles the static files served from STATIC_ROOT. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = None # List of upload handler classes to be applied in order. FILE_UPLOAD_HANDLERS = [ 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ] # Maximum size, in bytes, of a request before it will be streamed to the # file system instead of into memory. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Directory in which upload streamed files will be temporarily saved. A value of # `None` will make Django use the operating system's default temporary directory # (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode # you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html. FILE_UPLOAD_PERMISSIONS = None # The numeric mode to assign to newly-created directories, when uploading files. # The value should be a mode as you'd pass to os.chmod; # see http://docs.python.org/lib/os-file-dir.html. FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # Python module path where user will place custom format definition. # The directory where this setting is pointing should contain subdirectories # named as the locales, containing a formats.py file # (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use) FORMAT_MODULE_PATH = None # Default formatting for date objects. See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'N j, Y' # Default formatting for datetime objects. See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATETIME_FORMAT = 'N j, Y, P' # Default formatting for time objects. See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date TIME_FORMAT = 'P' # Default formatting for date objects when only the year and month are relevant. # See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date YEAR_MONTH_FORMAT = 'F Y' # Default formatting for date objects when only the month and day are relevant. # See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date MONTH_DAY_FORMAT = 'F j' # Default short formatting for date objects. See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATE_FORMAT = 'm/d/Y' # Default short formatting for datetime objects. # See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATETIME_FORMAT = 'm/d/Y P' # Default formats to be used when parsing dates from input boxes, in order # See all available format string here: # http://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ] # Default formats to be used when parsing times from input boxes, in order # See all available format string here: # http://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates TIME_INPUT_FORMATS = [ '%H:%M:%S', # '14:30:59' '%H:%M:%S.%f', # '14:30:59.000200' '%H:%M', # '14:30' ] # Default formats to be used when parsing dates and times from input boxes, # in order # See all available format string here: # http://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%m/%d/%y %H:%M', # '10/25/06 14:30' '%m/%d/%y', # '10/25/06' ] # First day of week, to be used on calendars # 0 means Sunday, 1 means Monday... FIRST_DAY_OF_WEEK = 0 # Decimal separator symbol DECIMAL_SEPARATOR = '.' # Boolean that sets whether to add thousand separator when formatting numbers USE_THOUSAND_SEPARATOR = False # Number of digits that will be together, when splitting them by # THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands... NUMBER_GROUPING = 0 # Thousand separator symbol THOUSAND_SEPARATOR = ',' # The tablespaces to use for each model when not specified otherwise. DEFAULT_TABLESPACE = '' DEFAULT_INDEX_TABLESPACE = '' # Default X-Frame-Options header value X_FRAME_OPTIONS = 'SAMEORIGIN' USE_X_FORWARDED_HOST = False USE_X_FORWARDED_PORT = False # The Python dotted path to the WSGI application that Django's internal server # (runserver) will use. If `None`, the return value of # 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same # behavior as previous versions of Django. Otherwise this should point to an # actual WSGI application object. WSGI_APPLICATION = None # If your Django app is behind a proxy that sets a header to specify secure # connections, AND that proxy ensures that user-submitted headers with the # same name are ignored (so that people can't spoof it), set this value to # a tuple of (header_name, header_value). For any requests that come in with # that header/value, request.is_secure() will return True. # WARNING! Only set this if you fully understand what you're doing. Otherwise, # you may be opening yourself up to a security risk. SECURE_PROXY_SSL_HEADER = None ############## # MIDDLEWARE # ############## # List of middleware classes to use. Order is important; in the request phase, # this middleware classes will be applied in the order given, and in the # response phase the middleware will be applied in reverse order. MIDDLEWARE_CLASSES = [ 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', ] ############ # SESSIONS # ############ # Cache to store session data if using the cache session backend. SESSION_CACHE_ALIAS = 'default' # Cookie name. This can be whatever you want. SESSION_COOKIE_NAME = 'sessionid' # Age of cookie, in seconds (default: 2 weeks). SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # A string like ".example.com", or None for standard domain cookie. SESSION_COOKIE_DOMAIN = None # Whether the session cookie should be secure (https:// only). SESSION_COOKIE_SECURE = False # The path of the session cookie. SESSION_COOKIE_PATH = '/' # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others) SESSION_COOKIE_HTTPONLY = True # Whether to save the session data on every request. SESSION_SAVE_EVERY_REQUEST = False # Whether a user's session cookie expires when the Web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = False # The module to store session data SESSION_ENGINE = 'django.contrib.sessions.backends.db' # Directory to store session files if using the file session module. If None, # the backend will use a sensible default. SESSION_FILE_PATH = None # class to serialize session data SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' ######### # CACHE # ######### # The cache backends to use. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } CACHE_MIDDLEWARE_KEY_PREFIX = '' CACHE_MIDDLEWARE_SECONDS = 600 CACHE_MIDDLEWARE_ALIAS = 'default' ################## # AUTHENTICATION # ################## AUTH_USER_MODEL = 'auth.User' AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] LOGIN_URL = '/accounts/login/' LOGOUT_URL = '/accounts/logout/' LOGIN_REDIRECT_URL = '/accounts/profile/' # The number of days a password reset link is valid for PASSWORD_RESET_TIMEOUT_DAYS = 3 # the first hasher in this list is the preferred algorithm. any # password using different algorithms will be converted automatically # upon login PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', 'django.contrib.auth.hashers.BCryptPasswordHasher', 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', 'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher', 'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher', 'django.contrib.auth.hashers.CryptPasswordHasher', ] AUTH_PASSWORD_VALIDATORS = [] ########### # SIGNING # ########### SIGNING_BACKEND = 'django.core.signing.TimestampSigner' ######## # CSRF # ######## # Dotted path to callable to be used as view when a request is # rejected by the CSRF middleware. CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure' # Settings for CSRF cookie. CSRF_COOKIE_NAME = 'csrftoken' CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52 CSRF_COOKIE_DOMAIN = None CSRF_COOKIE_PATH = '/' CSRF_COOKIE_SECURE = False CSRF_COOKIE_HTTPONLY = False CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN' CSRF_TRUSTED_ORIGINS = [] ############ # MESSAGES # ############ # Class to use as messages backend MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage' # Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within # django.contrib.messages to avoid imports in this settings file. ########### # LOGGING # ########### # The callable to use to configure logging LOGGING_CONFIG = 'logging.config.dictConfig' # Custom logging configuration. LOGGING = {} # Default exception reporter filter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter' ########### # TESTING # ########### # The name of the class to use to run the test suite TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Apps that don't need to be serialized at test database creation time # (only apps with migrations are to start with) TEST_NON_SERIALIZED_APPS = [] ############ # FIXTURES # ############ # The list of directories to search for fixtures FIXTURE_DIRS = [] ############### # STATICFILES # ############### # A list of locations of additional static files STATICFILES_DIRS = [] # The default file storage backend used during the build process STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ] ############## # MIGRATIONS # ############## # Migration module overrides for apps, by app label. MIGRATION_MODULES = {} ################# # SYSTEM CHECKS # ################# # List of all issues generated by system checks that should be silenced. Light # issues like warnings, infos or debugs will not generate a message. Silencing # serious issues like errors and criticals does not result in hiding the # message, but Django will not stop you from e.g. running server. SILENCED_SYSTEM_CHECKS = [] ####################### # SECURITY MIDDLEWARE # ####################### SECURE_BROWSER_XSS_FILTER = False SECURE_CONTENT_TYPE_NOSNIFF = False SECURE_HSTS_INCLUDE_SUBDOMAINS = False SECURE_HSTS_SECONDS = 0 SECURE_REDIRECT_EXEMPT = [] SECURE_SSL_HOST = None SECURE_SSL_REDIRECT = False
bsd-3-clause
guerrerocarlos/odoo
addons/hw_scale/__openerp__.py
82
1646
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Weighting Scale Hardware Driver', 'version': '1.0', 'category': 'Hardware Drivers', 'sequence': 6, 'summary': 'Hardware Driver for Weighting Scales', 'website': 'https://www.odoo.com/page/point-of-sale', 'description': """ Barcode Scanner Hardware Driver ================================ This module allows the point of sale to connect to a scale using a USB HSM Serial Scale Interface, such as the Mettler Toledo Ariva. """, 'author': 'OpenERP SA', 'depends': ['hw_proxy'], 'test': [ ], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
xpansa/stock-logistics-workflow
stock_dropshipping_dual_invoice/wizard/stock_invoice_onshipping.py
15
5807
# Author: Leonardo Pistone # Copyright 2015 Camptocamp SA # Contributor: Pedro M. Baeza # Copyright 2015 Serv. Tecnol. Avanzados # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from openerp import models, api, fields from openerp.tools.translate import _ class StockInvoiceOnshipping(models.TransientModel): _inherit = "stock.invoice.onshipping" @api.model def _get_journal_type(self): res_id = self.env.context.get('active_id', False) picking = self.env['stock.picking'].browse(res_id) if picking.move_lines: src_usage = picking.move_lines[0].location_id.usage dest_usage = picking.move_lines[0].location_dest_id.usage if src_usage == 'supplier' and dest_usage == 'customer': moves = picking.move_lines.filtered('purchase_line_id')[:1] pick_purchase = ( moves.purchase_line_id.order_id.invoice_method == 'picking') return "purchase" if pick_purchase else "sale" return super(StockInvoiceOnshipping, self)._get_journal_type() def _default_second_journal(self): res = self.env['account.journal'].search([('type', '=', 'sale')]) return res and res[0] or False def _need_two_invoices(self): if 'active_id' in self.env.context: picking = self.env['stock.picking'].browse( self.env.context['active_id']) so = picking.sale_id moves = picking.move_lines.filtered('purchase_line_id')[:1] if (so.order_policy == 'picking' and moves.purchase_line_id.order_id.invoice_method == 'picking'): # MTO pickings are also associated to both a sales and purchase # order line, so we still need to investigate if this an MTO # or drop-shipping p_type = picking.picking_type_id return (p_type.default_location_src_id.usage == 'supplier' and p_type.default_location_dest_id.usage == 'customer') return False @api.depends('journal_type', 'need_two_invoices') def _get_wizard_title(self): if self.need_two_invoices: self.wizard_title = _("Create Two Invoices") else: selection = dict(self.fields_get()['journal_type']['selection']) journal_type = self._get_journal_type() self.wizard_title = selection[journal_type] @api.multi def open_invoice(self): res = super(StockInvoiceOnshipping, self).open_invoice() if self.need_two_invoices: res['view_id'] = self.env.ref('account.invoice_tree').id res['name'] = _('Invoices') res['view_mode'] = 'tree' del res['views'] del res['display_name'] return res @api.multi def create_invoice(self): if self.need_two_invoices: picking_ids = self.env.context['active_ids'] picking_model = self.env['stock.picking'] pickings = picking_model.browse(picking_ids) # Group picking by customer pickings_by_partner = {} for picking in pickings: if not pickings_by_partner.get(picking.partner_id): pickings_by_partner[picking.partner_id] = picking_model pickings_by_partner[picking.partner_id] += picking first_invoice_ids = [] for partner, pickings_grouped in pickings_by_partner.iteritems(): first_invoice_ids += pickings_grouped.with_context( partner_to_invoice_id=partner.id, date_inv=self.invoice_date, inv_type='in_invoice', ).action_invoice_create( journal_id=self.journal_id.id, group=self.group, type='in_invoice', ) # Allow to invoice again pickings.mapped('move_lines').filtered( lambda x: x.invoice_state == 'invoiced').write( {'invoice_state': '2binvoiced'}) second_invoice_ids = pickings.with_context( date_inv=self.invoice_date, inv_type='out_invoice', ).action_invoice_create( journal_id=self.second_journal_id.id, group=self.group, type='out_invoice', ) return first_invoice_ids + second_invoice_ids else: return super(StockInvoiceOnshipping, self).create_invoice() need_two_invoices = fields.Boolean('Need two invoices', default=_need_two_invoices) second_journal_id = fields.Many2one('account.journal', 'Second Destination Journal', default=_default_second_journal) wizard_title = fields.Char('Wizard Title', compute='_get_wizard_title', readonly=True) journal_type = fields.Selection(default=_get_journal_type)
agpl-3.0
rajalokan/nova
nova/cmd/api.py
1
2267
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for Nova API. Starts both the EC2 and OpenStack APIs in separate greenthreads. """ import sys from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr import nova.conf from nova import config from nova import exception from nova.i18n import _LE, _LW from nova import objects from nova import service from nova import utils from nova import version CONF = nova.conf.CONF def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") utils.monkey_patch() objects.register_all() if 'osapi_compute' in CONF.enabled_apis: # NOTE(mriedem): This is needed for caching the nova-compute service # version. objects.Service.enable_min_version_cache() log = logging.getLogger(__name__) gmr.TextGuruMeditation.setup_autorun(version) launcher = service.process_launcher() started = 0 for api in CONF.enabled_apis: should_use_ssl = api in CONF.enabled_ssl_apis try: server = service.WSGIService(api, use_ssl=should_use_ssl) launcher.launch_service(server, workers=server.workers or 1) started += 1 except exception.PasteAppNotFound as ex: log.warning( _LW("%s. ``enabled_apis`` includes bad values. " "Fix to remove this warning."), ex) if started == 0: log.error(_LE('No APIs were started. ' 'Check the enabled_apis config option.')) sys.exit(1) launcher.wait()
apache-2.0
tizianasellitto/servo
tests/wpt/web-platform-tests/tools/pytest/_pytest/pdb.py
176
3491
""" interactive debugging with PDB, the Python Debugger. """ from __future__ import absolute_import import pdb import sys import pytest def pytest_addoption(parser): group = parser.getgroup("general") group._addoption('--pdb', action="store_true", dest="usepdb", default=False, help="start the interactive Python debugger on errors.") def pytest_namespace(): return {'set_trace': pytestPDB().set_trace} def pytest_configure(config): if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') old = (pdb.set_trace, pytestPDB._pluginmanager) def fin(): pdb.set_trace, pytestPDB._pluginmanager = old pytestPDB._config = None pdb.set_trace = pytest.set_trace pytestPDB._pluginmanager = config.pluginmanager pytestPDB._config = config config._cleanup.append(fin) class pytestPDB: """ Pseudo PDB that defers to the real pdb. """ _pluginmanager = None _config = None def set_trace(self): """ invoke PDB set_trace debugging, dropping any IO capturing. """ import _pytest.config frame = sys._getframe().f_back if self._pluginmanager is not None: capman = self._pluginmanager.getplugin("capturemanager") if capman: capman.suspendcapture(in_=True) tw = _pytest.config.create_terminal_writer(self._config) tw.line() tw.sep(">", "PDB set_trace (IO-capturing turned off)") self._pluginmanager.hook.pytest_enter_pdb(config=self._config) pdb.Pdb().set_trace(frame) class PdbInvoke: def pytest_exception_interact(self, node, call, report): capman = node.config.pluginmanager.getplugin("capturemanager") if capman: out, err = capman.suspendcapture(in_=True) sys.stdout.write(out) sys.stdout.write(err) _enter_pdb(node, call.excinfo, report) def pytest_internalerror(self, excrepr, excinfo): for line in str(excrepr).split("\n"): sys.stderr.write("INTERNALERROR> %s\n" %line) sys.stderr.flush() tb = _postmortem_traceback(excinfo) post_mortem(tb) def _enter_pdb(node, excinfo, rep): # XXX we re-use the TerminalReporter's terminalwriter # because this seems to avoid some encoding related troubles # for not completely clear reasons. tw = node.config.pluginmanager.getplugin("terminalreporter")._tw tw.line() tw.sep(">", "traceback") rep.toterminal(tw) tw.sep(">", "entering PDB") tb = _postmortem_traceback(excinfo) post_mortem(tb) rep._pdbshown = True return rep def _postmortem_traceback(excinfo): # A doctest.UnexpectedException is not useful for post_mortem. # Use the underlying exception instead: from doctest import UnexpectedException if isinstance(excinfo.value, UnexpectedException): return excinfo.value.exc_info[2] else: return excinfo._excinfo[2] def _find_last_non_hidden_frame(stack): i = max(0, len(stack) - 1) while i and stack[i][0].f_locals.get("__tracebackhide__", False): i -= 1 return i def post_mortem(t): class Pdb(pdb.Pdb): def get_stack(self, f, t): stack, i = pdb.Pdb.get_stack(self, f, t) if f is None: i = _find_last_non_hidden_frame(stack) return stack, i p = Pdb() p.reset() p.interaction(None, t)
mpl-2.0
jwhitlock/web-platform-compat
webplatformcompat/v1/routers.py
2
2122
# -*- coding: utf-8 -*- """URL router for v1 API endpoints.""" from ..routers import GroupedRouter from .viewsets import ( BrowserViewSet, FeatureViewSet, MaturityViewSet, ReferenceViewSet, SectionViewSet, SpecificationViewSet, SupportViewSet, VersionViewSet, HistoricalBrowserViewSet, HistoricalFeatureViewSet, HistoricalMaturityViewSet, HistoricalReferenceViewSet, HistoricalSectionViewSet, HistoricalSpecificationViewSet, HistoricalSupportViewSet, HistoricalVersionViewSet, ChangesetViewSet, UserViewSet, ViewFeaturesViewSet) router = GroupedRouter(trailing_slash=False, version='v1') router.register(r'browsers', BrowserViewSet, group='resources') router.register(r'versions', VersionViewSet, group='resources') router.register(r'features', FeatureViewSet, group='resources') router.register(r'supports', SupportViewSet, group='resources') router.register(r'specifications', SpecificationViewSet, group='resources') router.register(r'maturities', MaturityViewSet, group='resources') router.register(r'sections', SectionViewSet, group='resources') router.register(r'references', ReferenceViewSet, group='resources') router.register(r'changesets', ChangesetViewSet, group='change_control') router.register(r'users', UserViewSet, group='change_control') router.register( r'historical_browsers', HistoricalBrowserViewSet, group='history') router.register( r'historical_versions', HistoricalVersionViewSet, group='history') router.register( r'historical_features', HistoricalFeatureViewSet, group='history') router.register( r'historical_supports', HistoricalSupportViewSet, group='history') router.register( r'historical_specifications', HistoricalSpecificationViewSet, group='history') router.register( r'historical_maturities', HistoricalMaturityViewSet, group='history') router.register( r'historical_sections', HistoricalSectionViewSet, group='history') router.register( r'historical_references', HistoricalReferenceViewSet, group='history') router.register( r'view_features', ViewFeaturesViewSet, base_name='viewfeatures', group='views')
mpl-2.0
lthall/Leonard_ardupilot
Tools/autotest/param_metadata/xmlemit_mp.py
10
3219
#!/usr/bin/env python from xml.sax.saxutils import escape, quoteattr from emit import Emit from param import known_param_fields, known_units from lxml import etree # Emit ArduPilot documentation in an machine readable XML format for Mission Planner class XmlEmitMP(Emit): def __init__(self, *args, **kwargs): Emit.__init__(self, *args, **kwargs) self.mp_fname = 'ParameterMetaData.xml' self.f = open(self.mp_fname, mode='w') self.preamble = '''<?xml version="1.0" encoding="utf-8"?>\n''' self.f.write(self.preamble) self.f.write('<Params>\n') self.gname = None self.skip_name = False def close(self): self.f.write(' </%s>\n' % self.gname) self.f.write('''</Params>\n''') self.f.close() # sort and reformat XML parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse(self.mp_fname, parser) root = tree.getroot() vehicle = tree.find(self.gname) sort_xml_node(vehicle) sorted_unicode = etree.tostring(root, pretty_print=True, encoding='unicode') f = open(self.mp_fname, mode='w') f.write(self.preamble) f.write(sorted_unicode) f.close() def emit_comment(self, s): self.f.write("<!-- " + s + " -->") def start_libraries(self): self.skip_name = True def emit(self, g): t = "" if not self.skip_name: self.gname = g.reference if self.gname == "ArduCopter": self.gname = "ArduCopter2" if self.gname == "APMrover2" or self.gname == "Rover": self.gname = "ArduRover" t = ' <%s>\n' % self.gname for param in g.params: # Begin our parameter node # Get param name and and remove key name = param.name.split(':')[-1] t += ' <%s>\n' % name if hasattr(param, 'DisplayName'): t += ' <DisplayName>%s</DisplayName>\n' % param.DisplayName if hasattr(param, 'Description'): t += ' <Description>%s</Description>\n' % escape(param.Description) # i.e. parameter docs if hasattr(param, 'User'): t += ' <User>%s</User>\n' % param.User # i.e. Standard or Advanced # not used yet # if hasattr(param, 'Calibration'): # t += ' <Calibration>%s</Calibration\n' % quoteattr(param.Calibration) # Add values as chidren of this node for field in param.__dict__.keys(): if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields: t += ' <%s>%s</%s>\n' % (field, escape(param.__dict__[field]), field) t += ' </%s>\n' % name # print t self.f.write(t) def sort_xml_node(node): if not isinstance(node.tag, str): # not a TAG, it is comment or DATA # no need to sort return # sort child along attr node[:] = sorted(node, key=lambda field: field.tag) # and recurse for child in node: child[:] = sorted(child, key=lambda field: field.tag)
gpl-3.0
gautam1858/tensorflow
tensorflow/contrib/quantize/python/fold_batch_norms_test.py
17
34203
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for folding batch norm layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.layers.python.layers import layers from tensorflow.contrib.quantize.python import fold_batch_norms from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.training import saver as saver_lib batch_norm = layers.batch_norm conv2d = layers.conv2d fully_connected = layers.fully_connected separable_conv2d = layers.separable_conv2d # TODO(suharshs): Use parameterized test once OSS TF supports it. class FoldBatchNormsTest(test_util.TensorFlowTestCase): def _RunTestOverParameters(self, test_fn): parameters_list = [ # (relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm, # freeze_batch_norm_delay, insert identity node) (nn_ops.relu6, 'Relu6', False, False, False, 100, False), (nn_ops.relu, 'Relu', False, False, False, None, False), (nn_ops.relu6, 'Relu6', True, False, False, 100, False), (nn_ops.relu, 'Relu', True, False, False, None, False), (nn_ops.relu6, 'Relu6', False, True, False, 100, False), (nn_ops.relu, 'Relu', False, True, False, None, False), (nn_ops.relu6, 'Relu6', True, True, False, 100, False), (nn_ops.relu, 'Relu', True, True, False, None, False), # Fused batch norm always has scaling enabled. (nn_ops.relu6, 'Relu6', False, True, True, None, False), (nn_ops.relu, 'Relu', False, True, True, 100, False), (nn_ops.relu6, 'Relu6', True, True, True, None, False), (nn_ops.relu, 'Relu', True, True, True, 100, False), (nn_ops.relu6, 'Relu6', False, True, True, None, True), (nn_ops.relu, 'Relu', False, True, True, 100, True), (nn_ops.relu6, 'Relu6', True, True, True, None, True), (nn_ops.relu, 'Relu', True, True, True, 100, True), ] for params in parameters_list: test_fn(params[0], params[1], params[2], params[3], params[4], params[5], params[6]) def _TestFoldConv2d(self, relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm, freeze_batch_norm_delay, insert_identity_node): """Tests folding cases: inputs -> Conv2d with batch norm -> Relu*. Args: relu: Callable that returns an Operation, a factory method for the Relu*. relu_op_name: String, name of the Relu* operation. with_bypass: Bool, when true there is an extra connection added from inputs to just before Relu*. has_scaling: Bool, when true the batch norm has scaling. fused_batch_norm: Bool, when true the batch norm is fused. freeze_batch_norm_delay: None or the number of steps after which training switches to using frozen mean and variance insert_identity_node: Bool, insert identity node between conv and batch norm """ g = ops.Graph() with g.as_default(): batch_size, height, width = 5, 128, 128 inputs = array_ops.zeros((batch_size, height, width, 3)) out_depth = 3 if with_bypass else 32 stride = 1 if with_bypass else 2 activation_fn = None if with_bypass else relu name = 'test/test2' if with_bypass else 'test' if insert_identity_node: with g.name_scope(name): node = conv2d( inputs, out_depth, [5, 5], stride=stride, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=None, normalizer_fn=None, biases_initializer=None) conv_out = array_ops.identity(node, name='conv_out') node = batch_norm( conv_out, center=True, scale=has_scaling, decay=1.0 - 0.003, fused=fused_batch_norm) if activation_fn is not None: node = activation_fn(node) conv_name = name + '/Conv' else: node = conv2d( inputs, out_depth, [5, 5], stride=stride, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=activation_fn, normalizer_fn=batch_norm, normalizer_params=self._BatchNormParams( scale=has_scaling, fused=fused_batch_norm), scope=name) conv_name = name if with_bypass: node = math_ops.add(inputs, node, name='test/Add') relu(node, name='test/' + relu_op_name) fold_batch_norms.FoldBatchNorms( g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay) folded_mul = g.get_operation_by_name(conv_name + '/mul_fold') self.assertEqual(folded_mul.type, 'Mul') self._AssertInputOpsAre(folded_mul, [ conv_name + '/correction_mult', self._BatchNormMultiplierName(conv_name, has_scaling, fused_batch_norm) ]) self._AssertOutputGoesToOps(folded_mul, g, [conv_name + '/Conv2D_Fold']) folded_conv = g.get_operation_by_name(conv_name + '/Conv2D_Fold') self.assertEqual(folded_conv.type, 'Conv2D') self._AssertInputOpsAre(folded_conv, [conv_name + '/mul_fold', inputs.op.name]) self._AssertOutputGoesToOps(folded_conv, g, [conv_name + '/post_conv_mul']) folded_add = g.get_operation_by_name(conv_name + '/add_fold') self.assertEqual(folded_add.type, 'Add') self._AssertInputOpsAre(folded_add, [ conv_name + '/correction_add', self._BathNormBiasName(conv_name, fused_batch_norm) ]) output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name] self._AssertOutputGoesToOps(folded_add, g, output_op_names) if freeze_batch_norm_delay is not None: self._AssertMovingAveragesAreFrozen(g, name) for op in g.get_operations(): self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name) def testFoldConv2d(self): self._RunTestOverParameters(self._TestFoldConv2d) def testMultipleLayerConv2d(self, relu=nn_ops.relu, relu_op_name='Relu', has_scaling=True, fused_batch_norm=False, freeze_batch_norm_delay=None, insert_identity_node=False): """Tests folding cases for a network with multiple layers. Args: relu: Callable that returns an Operation, a factory method for the Relu*. relu_op_name: String, name of the Relu* operation. has_scaling: Bool, when true the batch norm has scaling. fused_batch_norm: Bool, when true the batch norm is fused. freeze_batch_norm_delay: None or the number of steps after which training switches to using frozen mean and variance insert_identity_node: Bool, insert identity node between conv and batch norm """ g = ops.Graph() with g.as_default(): batch_size, height, width = 5, 128, 128 inputs = array_ops.zeros((batch_size, height, width, 3)) out_depth = 3 stride = 1 activation_fn = relu scope = 'topnet/testnet' with variable_scope.variable_scope(scope, [inputs]): layer1 = conv2d( inputs, out_depth, [5, 5], stride=stride, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=None, normalizer_fn=None, scope='testnet/layer1') # Add bn and relu with different scope layer1 = batch_norm( layer1, scale=has_scaling, fused=fused_batch_norm, scope='layer1') layer1 = activation_fn(layer1) layer2 = conv2d( layer1, 2 * out_depth, [5, 5], stride=stride, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=activation_fn, normalizer_fn=batch_norm, normalizer_params=self._BatchNormParams( scale=has_scaling, fused=fused_batch_norm), scope='testnet/layer2') # Add bn and relu with different scope layer2 = batch_norm( layer2, scale=has_scaling, fused=fused_batch_norm, scope='layer2') _ = activation_fn(layer2) scope = 'topnet/testnet/testnet/layer2' fold_batch_norms.FoldBatchNorms( g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay) folded_mul = g.get_operation_by_name(scope + '/mul_fold') self.assertEqual(folded_mul.type, 'Mul') self._AssertInputOpsAre(folded_mul, [ scope + '/correction_mult', self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm) ]) self._AssertOutputGoesToOps(folded_mul, g, [scope + '/Conv2D_Fold']) folded_conv = g.get_operation_by_name(scope + '/Conv2D_Fold') self.assertEqual(folded_conv.type, 'Conv2D') # Remove :0 at end of name for tensor prior to comparison self._AssertInputOpsAre(folded_conv, [scope + '/mul_fold', layer1.name[:-2]]) self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul']) folded_add = g.get_operation_by_name(scope + '/add_fold') self.assertEqual(folded_add.type, 'Add') self._AssertInputOpsAre(folded_add, [ scope + '/correction_add', self._BathNormBiasName(scope, fused_batch_norm) ]) output_op_names = [scope + '/' + relu_op_name] self._AssertOutputGoesToOps(folded_add, g, output_op_names) if freeze_batch_norm_delay is not None: self._AssertMovingAveragesAreFrozen(g, scope) for op in g.get_operations(): self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name) def _TestFoldConv2dUnknownShape(self, relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm, freeze_batch_norm_delay, insert_identity_node=False): """Tests folding cases: inputs -> Conv2d with batch norm -> Relu*. Tests that folding works even with an input shape where some dimensions are not known (i.e. None). Args: relu: Callable that returns an Operation, a factory method for the Relu*. relu_op_name: String, name of the Relu* operation. with_bypass: Bool, when true there is an extra connection added from inputs to just before Relu*. has_scaling: Bool, when true the batch norm has scaling. fused_batch_norm: Bool, when true the batch norm is fused. freeze_batch_norm_delay: None or the number of steps after which training switches to using frozen mean and variance insert_identity_node: Bool, insert identity node between conv and batch norm """ g = ops.Graph() with g.as_default(): inputs = array_ops.placeholder(dtypes.float32, shape=(5, None, None, 3)) out_depth = 3 if with_bypass else 32 stride = 1 if with_bypass else 2 activation_fn = None if with_bypass else relu scope = 'test/test2' if with_bypass else 'test' node = conv2d( inputs, out_depth, [5, 5], stride=stride, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=activation_fn, normalizer_fn=batch_norm, normalizer_params=self._BatchNormParams( scale=has_scaling, fused=fused_batch_norm), scope=scope) if with_bypass: node = math_ops.add(inputs, node, name='test/Add') relu(node, name='test/' + relu_op_name) fold_batch_norms.FoldBatchNorms( g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay) folded_mul = g.get_operation_by_name(scope + '/mul_fold') self.assertEqual(folded_mul.type, 'Mul') self._AssertInputOpsAre(folded_mul, [ scope + '/correction_mult', self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm) ]) self._AssertOutputGoesToOps(folded_mul, g, [scope + '/Conv2D_Fold']) folded_conv = g.get_operation_by_name(scope + '/Conv2D_Fold') self.assertEqual(folded_conv.type, 'Conv2D') self._AssertInputOpsAre(folded_conv, [scope + '/mul_fold', inputs.op.name]) self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul']) folded_add = g.get_operation_by_name(scope + '/add_fold') self.assertEqual(folded_add.type, 'Add') self._AssertInputOpsAre(folded_add, [ scope + '/correction_add', self._BathNormBiasName(scope, fused_batch_norm) ]) output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name] self._AssertOutputGoesToOps(folded_add, g, output_op_names) if freeze_batch_norm_delay is not None: self._AssertMovingAveragesAreFrozen(g, scope) for op in g.get_operations(): self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name) def testFoldConv2dUnknownShape(self): self._RunTestOverParameters(self._TestFoldConv2dUnknownShape) def _TestFoldFullyConnectedLayer( self, relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm, freeze_batch_norm_delay, insert_identity_node): """Tests folding cases: inputs -> FC with batch norm -> Relu*. Args: relu: Callable that returns an Operation, a factory method for the Relu*. relu_op_name: String, name of the Relu* operation. with_bypass: Bool, when true there is an extra connection added from inputs to just before Relu*. has_scaling: Bool, when true the batch norm has scaling. fused_batch_norm: Bool, when true the batch norm is fused. freeze_batch_norm_delay: None or the number of steps after which training switches to using frozen mean and variance insert_identity_node: Bool, insert identity node between conv and batch norm """ g = ops.Graph() with g.as_default(): batch_size, depth = 5, 256 inputs = array_ops.zeros((batch_size, depth)) out_depth = 256 if with_bypass else 128 activation_fn = None if with_bypass else relu name = 'test/test2' if with_bypass else 'test' insert_identity_node = fused_batch_norm if insert_identity_node: with g.name_scope(name): node = fully_connected( inputs, out_depth, weights_initializer=self._WeightInit(0.03), activation_fn=None, normalizer_fn=None, biases_initializer=None) node = array_ops.identity(node, name='fc_out') node = batch_norm( node, center=True, scale=has_scaling, decay=1.0 - 0.003, fused=fused_batch_norm) if activation_fn is not None: node = activation_fn(node) fc_name = name + '/fully_connected' else: node = fully_connected( inputs, out_depth, weights_initializer=self._WeightInit(0.03), activation_fn=activation_fn, normalizer_fn=batch_norm, normalizer_params=self._BatchNormParams( scale=has_scaling, fused=fused_batch_norm), scope=name) fc_name = name if with_bypass: node = math_ops.add(inputs, node, name='test/Add') relu(node, name='test/' + relu_op_name) fold_batch_norms.FoldBatchNorms( g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay) folded_mul = g.get_operation_by_name(fc_name + '/mul_fold') self.assertEqual(folded_mul.type, 'Mul') self._AssertInputOpsAre(folded_mul, [ fc_name + '/correction_mult', self._BatchNormMultiplierName(fc_name, has_scaling, fused_batch_norm) ]) self._AssertOutputGoesToOps(folded_mul, g, [fc_name + '/MatMul_Fold']) folded_conv = g.get_operation_by_name(fc_name + '/MatMul_Fold') self.assertEqual(folded_conv.type, 'MatMul') self._AssertInputOpsAre(folded_conv, [fc_name + '/mul_fold', inputs.op.name]) self._AssertOutputGoesToOps(folded_conv, g, [fc_name + '/post_conv_mul']) folded_add = g.get_operation_by_name(fc_name + '/add_fold') self.assertEqual(folded_add.type, 'Add') self._AssertInputOpsAre(folded_add, [ fc_name + '/correction_add', self._BathNormBiasName(fc_name, fused_batch_norm) ]) output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name] self._AssertOutputGoesToOps(folded_add, g, output_op_names) if freeze_batch_norm_delay is not None: self._AssertMovingAveragesAreFrozen(g, name) for op in g.get_operations(): self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name) def testFoldFullyConnectedLayer(self): self._RunTestOverParameters(self._TestFoldFullyConnectedLayer) def _TestFoldDepthwiseConv2d(self, relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm, freeze_batch_norm_delay, insert_identity_node): """Tests folding: inputs -> DepthwiseConv2d with batch norm -> Relu*. Args: relu: Callable that returns an Operation, a factory method for the Relu*. relu_op_name: String, name of the Relu* operation. with_bypass: Bool, when true there is an extra connection added from inputs to just before Relu*. has_scaling: Bool, when true the batch norm has scaling. fused_batch_norm: Bool, when true the batch norm is fused. freeze_batch_norm_delay: None or the number of steps after which training insert_identity_node: Bool, insert identity node between conv and batch norm switches to using frozen mean and variance """ g = ops.Graph() with g.as_default(): batch_size, height, width = 5, 128, 128 inputs = array_ops.zeros((batch_size, height, width, 3)) stride = 1 if with_bypass else 2 activation_fn = None if with_bypass else relu name = 'test/test2' if with_bypass else 'test' if insert_identity_node: with g.name_scope(name): node = separable_conv2d( inputs, None, [5, 5], stride=stride, depth_multiplier=1.0, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=None, normalizer_fn=None, biases_initializer=None) node = array_ops.identity(node, name='sep_conv_out') node = batch_norm( node, center=True, scale=has_scaling, decay=1.0 - 0.003, fused=fused_batch_norm) if activation_fn is not None: node = activation_fn(node) sep_conv_name = name + '/SeparableConv2d' else: node = separable_conv2d( inputs, None, [5, 5], stride=stride, depth_multiplier=1.0, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=activation_fn, normalizer_fn=batch_norm, normalizer_params=self._BatchNormParams( scale=has_scaling, fused=fused_batch_norm), scope=name) sep_conv_name = name if with_bypass: node = math_ops.add(inputs, node, name='test/Add') relu(node, name='test/' + relu_op_name) fold_batch_norms.FoldBatchNorms( g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay) folded_mul = g.get_operation_by_name(sep_conv_name + '/mul_fold') self.assertEqual(folded_mul.type, 'Mul') if fused_batch_norm: scale_reshape_op_name = sep_conv_name + '/BatchNorm_Fold/scale_reshape' else: scale_reshape_op_name = sep_conv_name + '/scale_reshape' self._AssertInputOpsAre( folded_mul, [sep_conv_name + '/correction_mult', scale_reshape_op_name]) self._AssertOutputGoesToOps(folded_mul, g, [sep_conv_name + '/depthwise_Fold']) scale_reshape = g.get_operation_by_name(scale_reshape_op_name) self.assertEqual(scale_reshape.type, 'Reshape') self._AssertInputOpsAre(scale_reshape, [ self._BatchNormMultiplierName(sep_conv_name, has_scaling, fused_batch_norm), scale_reshape_op_name + '/shape' ]) self._AssertOutputGoesToOps(scale_reshape, g, [sep_conv_name + '/mul_fold']) folded_conv = g.get_operation_by_name(sep_conv_name + '/depthwise_Fold') self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative') self._AssertInputOpsAre(folded_conv, [sep_conv_name + '/mul_fold', inputs.op.name]) self._AssertOutputGoesToOps(folded_conv, g, [sep_conv_name + '/post_conv_mul']) folded_add = g.get_operation_by_name(sep_conv_name + '/add_fold') self.assertEqual(folded_add.type, 'Add') self._AssertInputOpsAre(folded_add, [ sep_conv_name + '/correction_add', self._BathNormBiasName(sep_conv_name, fused_batch_norm) ]) output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name] self._AssertOutputGoesToOps(folded_add, g, output_op_names) if freeze_batch_norm_delay is not None: self._AssertMovingAveragesAreFrozen(g, name) for op in g.get_operations(): self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name) def testFoldDepthwiseConv2d(self): self._RunTestOverParameters(self._TestFoldDepthwiseConv2d) def _TestFoldAtrousConv2d(self, relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm, freeze_batch_norm_delay, insert_identity_node): """Tests folding: inputs -> AtrousConv2d with batch norm -> Relu*. Args: relu: Callable that returns an Operation, a factory method for the Relu*. relu_op_name: String, name of the Relu* operation. with_bypass: Bool, when true there is an extra connection added from inputs to just before Relu*. has_scaling: Bool, when true the batch norm has scaling. fused_batch_norm: Bool, when true the batch norm is fused. freeze_batch_norm_delay: None or the number of steps after which training switches to using frozen mean and variance insert_identity_node: Bool, insert identity node between conv and batch norm """ g = ops.Graph() with g.as_default(): batch_size, height, width = 5, 128, 128 inputs = array_ops.zeros((batch_size, height, width, 3)) dilation_rate = 2 activation_fn = None if with_bypass else relu name = 'test/test2' if with_bypass else 'test' if insert_identity_node: with g.name_scope(name): node = separable_conv2d( inputs, None, [3, 3], rate=dilation_rate, depth_multiplier=1.0, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=None, normalizer_fn=None, biases_initializer=None) node = array_ops.identity(node, name='sep_conv_out') node = batch_norm( node, center=True, scale=has_scaling, decay=1.0 - 0.003, fused=fused_batch_norm) if activation_fn is not None: node = activation_fn(node) sep_conv_name = name + '/SeparableConv2d' else: node = separable_conv2d( inputs, None, [3, 3], rate=dilation_rate, depth_multiplier=1.0, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=activation_fn, normalizer_fn=batch_norm, normalizer_params=self._BatchNormParams( scale=has_scaling, fused=fused_batch_norm), scope=name) sep_conv_name = name if with_bypass: node = math_ops.add(inputs, node, name='test/Add') relu(node, name='test/' + relu_op_name) fold_batch_norms.FoldBatchNorms( g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay) folded_mul = g.get_operation_by_name(sep_conv_name + '/mul_fold') self.assertEqual(folded_mul.type, 'Mul') if fused_batch_norm: scale_reshape_op_name = sep_conv_name + '/BatchNorm_Fold/scale_reshape' else: scale_reshape_op_name = sep_conv_name + '/scale_reshape' self._AssertInputOpsAre( folded_mul, [sep_conv_name + '/correction_mult', scale_reshape_op_name]) self._AssertOutputGoesToOps(folded_mul, g, [sep_conv_name + '/depthwise_Fold']) scale_reshape = g.get_operation_by_name(scale_reshape_op_name) self.assertEqual(scale_reshape.type, 'Reshape') self._AssertInputOpsAre(scale_reshape, [ self._BatchNormMultiplierName(sep_conv_name, has_scaling, fused_batch_norm), scale_reshape_op_name + '/shape' ]) self._AssertOutputGoesToOps(scale_reshape, g, [sep_conv_name + '/mul_fold']) folded_conv = g.get_operation_by_name(sep_conv_name + '/depthwise_Fold') self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative') self._AssertInputOpsAre(folded_conv, [ sep_conv_name + '/mul_fold', sep_conv_name + '/depthwise/SpaceToBatchND' ]) if fused_batch_norm: self._AssertOutputGoesToOps(folded_conv, g, [sep_conv_name + '/BatchToSpaceND_Fold']) else: self._AssertOutputGoesToOps( folded_conv, g, [sep_conv_name + '/depthwise/BatchToSpaceND_Fold']) folded_add = g.get_operation_by_name(sep_conv_name + '/add_fold') self.assertEqual(folded_add.type, 'Add') self._AssertInputOpsAre(folded_add, [ sep_conv_name + '/correction_add', self._BathNormBiasName(sep_conv_name, fused_batch_norm) ]) output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name] self._AssertOutputGoesToOps(folded_add, g, output_op_names) if freeze_batch_norm_delay is not None: self._AssertMovingAveragesAreFrozen(g, name) for op in g.get_operations(): self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name) def testFoldAtrousConv2d(self): self._RunTestOverParameters(self._TestFoldAtrousConv2d) def _TestCompareFoldAndUnfolded(self, relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm, freeze_batch_norm_delay, insert_identity_node=False): """Tests that running folded and unfolded BN returns the same results. Args: relu: Callable that returns an Operation, a factory method for the Relu*. relu_op_name: String, name of the Relu* operation. with_bypass: Bool, when true there is an extra connection added from inputs to just before Relu*. has_scaling: Bool, when true the batch norm has scaling. fused_batch_norm: Bool, when true the batch norm is fused. freeze_batch_norm_delay: None or the number of steps after which training switches to using frozen mean and variance insert_identity_node: Bool, insert identity node between conv and batch norm """ random_seed.set_random_seed(1234) unfolded_g = ops.Graph() with unfolded_g.as_default(): batch_size, height, width = 5, 128, 128 inputs = random_ops.random_uniform( (batch_size, height, width, 3), dtype=dtypes.float32, seed=1234) out_depth = 3 if with_bypass else 32 stride = 1 if with_bypass else 2 activation_fn = None if with_bypass else relu scope = 'test/test2' if with_bypass else 'test' node = conv2d( inputs, out_depth, [5, 5], stride=stride, padding='SAME', weights_initializer=self._WeightInit(0.09), activation_fn=activation_fn, normalizer_fn=batch_norm, normalizer_params=self._BatchNormParams( scale=has_scaling, fused=fused_batch_norm), scope=scope) if with_bypass: node = math_ops.add(inputs, node, name='test/Add') relu_node = relu(node, name='test/' + relu_op_name) folded_g = self._CopyGraph(unfolded_g) with folded_g.as_default(): fold_batch_norms.FoldBatchNorms( folded_g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay) with session.Session(graph=unfolded_g) as sess: sess.run(variables.global_variables_initializer()) grad_node = gradients.gradients(relu_node, inputs) results = sess.run([relu_node, grad_node]) unfolded_forward, unfolded_backward = results[0], results[1] with session.Session(graph=folded_g) as sess: sess.run(variables.global_variables_initializer()) relu_node = folded_g.get_tensor_by_name(relu_node.name) inputs = folded_g.get_tensor_by_name(inputs.name) grad_node = gradients.gradients(relu_node, inputs) results = sess.run([relu_node, grad_node]) folded_forward, folded_backward = results[0], results[1] # Check that the folded and unfolded results match. self.assertAllClose(unfolded_forward, folded_forward, atol=1e-3) self.assertAllClose(unfolded_backward, folded_backward, atol=1e-3) def testCompareFoldAndUnfolded(self): self._RunTestOverParameters(self._TestCompareFoldAndUnfolded) def _BatchNormParams(self, scale=True, fused=False): return { 'center': True, 'scale': scale, 'decay': 1.0 - 0.003, 'fused': fused } def _BatchNormMultiplierName(self, scope, has_scaling, fused): if has_scaling: if fused: return scope + '/BatchNorm_Fold/mul' return scope + '/BatchNorm/batchnorm_1/mul' return scope + '/BatchNorm/batchnorm_1/Rsqrt' def _BathNormBiasName(self, scope, fused): if fused: return scope + '/BatchNorm_Fold/bias' return scope + '/BatchNorm/batchnorm_1/sub' def _WeightInit(self, stddev): """Returns a truncated normal variable initializer. Function is defined purely to shorten the name so that it stops wrapping. Args: stddev: Standard deviation of normal variable. Returns: An initializer that initializes with a truncated normal variable. """ return init_ops.truncated_normal_initializer(stddev=stddev, seed=1234) def _AssertInputOpsAre(self, op, in_op_names): """Asserts that all inputs to op come from in_op_names (disregarding order). Args: op: Operation to check inputs for. in_op_names: List of strings, operations where all op's inputs should come from. """ expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names] self.assertItemsEqual([t.name for t in op.inputs], expected_inputs) def _AssertOutputGoesToOps(self, op, graph, out_op_names): """Asserts that outputs from op go to out_op_names (and perhaps others). Args: op: Operation to check outputs for. graph: Graph where output operations are located. out_op_names: List of strings, operations where op's outputs should go. """ for out_op_name in out_op_names: out_op = graph.get_operation_by_name(out_op_name) self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs]) def _AssertMovingAveragesAreFrozen(self, graph, scope): """Asserts to check if moving mean and variance are frozen. Args: graph: Graph where the operations are located. scope: Scope of batch norm op """ moving_average_mult = graph.get_operation_by_name( scope + '/BatchNorm/AssignMovingAvg/mul') self.assertTrue( moving_average_mult.inputs[1].name.find('freeze_moving_mean/Merge') > 0) moving_var_mult = graph.get_operation_by_name( scope + '/BatchNorm/AssignMovingAvg_1/mul') self.assertTrue( moving_var_mult.inputs[1].name.find('freeze_moving_var/Merge') > 0) def _CopyGraph(self, graph): """Return a copy of graph.""" meta_graph = saver_lib.export_meta_graph( graph=graph, collection_list=graph.get_all_collection_keys()) graph_copy = ops.Graph() with graph_copy.as_default(): _ = saver_lib.import_meta_graph(meta_graph) return graph_copy if __name__ == '__main__': googletest.main()
apache-2.0
xujun10110/weevely3
modules/file/cp.py
16
1559
from core.vectors import PhpCode, ShellCmd, ModuleExec, Os from core.module import Module from core import modules class Cp(Module): """Copy single file.""" aliases = [ 'cp', 'copy' ] def init(self): self.register_info( { 'author': [ 'Emilio Pinna' ], 'license': 'GPLv3' } ) self.register_vectors( [ PhpCode( "(@copy('${srcpath}', '${dstpath}')&&print(1))||print(0);", name = 'php_copy' ), PhpCode( "(@file_put_contents('${dstpath}', file_get_contents('${srcpath}'))&&print(1))||print(0);", name = 'php_file_contents' ), ShellCmd( "cp '${srcpath}' '${dstpath}' && echo 1 || echo 0", name = 'sh_cp', target = Os.NIX ), ] ) self.register_arguments([ { 'name' : 'srcpath', 'help' : 'Remote source file path' }, { 'name' : 'dstpath', 'help' : 'Remote destination file path' }, { 'name' : '-vector', 'choices' : self.vectors.get_names() } ]) def run(self): vector_name, result = self.vectors.find_first_result( names = [ self.args.get('vector') ], format_args = self.args, condition = lambda result: True if result == '1' else False ) if vector_name and result: return True return False
gpl-3.0
user-none/calibre
src/calibre/library/__init__.py
14
2490
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' ''' Code to manage ebook library''' def db(path=None, read_only=False): from calibre.db.legacy import LibraryDatabase from calibre.utils.config import prefs from calibre.utils.filenames import expanduser return LibraryDatabase(expanduser(path) if path else prefs['library_path'], read_only=read_only) def generate_test_db(library_path, # {{{ num_of_records=20000, num_of_authors=6000, num_of_tags=10000, tag_length=7, author_length=7, title_length=10, max_authors=10, max_tags=10 ): import random, string, os, sys, time from calibre.constants import preferred_encoding if not os.path.exists(library_path): os.makedirs(library_path) letters = string.letters.decode(preferred_encoding) def randstr(length): return ''.join(random.choice(letters) for i in xrange(length)) all_tags = [randstr(tag_length) for j in xrange(num_of_tags)] print 'Generated', num_of_tags, 'tags' all_authors = [randstr(author_length) for j in xrange(num_of_authors)] print 'Generated', num_of_authors, 'authors' all_titles = [randstr(title_length) for j in xrange(num_of_records)] print 'Generated', num_of_records, 'titles' testdb = db(library_path) print 'Creating', num_of_records, 'records...' start = time.time() for i, title in enumerate(all_titles): print i+1, sys.stdout.flush() authors = random.randint(1, max_authors) authors = [random.choice(all_authors) for i in xrange(authors)] tags = random.randint(0, max_tags) tags = [random.choice(all_tags) for i in xrange(tags)] from calibre.ebooks.metadata.book.base import Metadata mi = Metadata(title, authors) mi.tags = tags testdb.import_book(mi, []) t = time.time() - start print '\nGenerated', num_of_records, 'records in:', t, 'seconds' print 'Time per record:', t/float(num_of_records) # }}} def current_library_path(): from calibre.utils.config import prefs path = prefs['library_path'] if path: path = path.replace('\\', '/') while path.endswith('/'): path = path[:-1] return path def current_library_name(): import posixpath path = current_library_path() if path: return posixpath.basename(path)
gpl-3.0
Abhinav117/pymtl
pisa/pisa_inst_slt_test.py
4
5401
#========================================================================= # pisa_slt_test.py #========================================================================= import pytest import random import pisa_encoding from pymtl import Bits from PisaSim import PisaSim from pisa_inst_test_utils import * #------------------------------------------------------------------------- # gen_basic_test #------------------------------------------------------------------------- def gen_basic_test(): return """ mfc0 r1, mngr2proc < 5 mfc0 r2, mngr2proc < 4 nop nop nop nop nop nop nop nop slt r3, r1, r2 nop nop nop nop nop nop nop nop mtc0 r3, proc2mngr > 0 nop nop nop nop nop nop nop nop """ #------------------------------------------------------------------------- # gen_dest_byp_test #------------------------------------------------------------------------- def gen_dest_byp_test(): return [ gen_rr_dest_byp_test( 5, "slt", 1, 0, 0 ), gen_rr_dest_byp_test( 4, "slt", 1, 1, 0 ), gen_rr_dest_byp_test( 3, "slt", 0, 1, 1 ), gen_rr_dest_byp_test( 2, "slt", 2, 1, 0 ), gen_rr_dest_byp_test( 1, "slt", 2, 2, 0 ), gen_rr_dest_byp_test( 0, "slt", 1, 2, 1 ), ] #------------------------------------------------------------------------- # gen_src0_byp_test #------------------------------------------------------------------------- def gen_src0_byp_test(): return [ gen_rr_src0_byp_test( 5, "slt", 3, 2, 0 ), gen_rr_src0_byp_test( 4, "slt", 3, 3, 0 ), gen_rr_src0_byp_test( 3, "slt", 2, 3, 1 ), gen_rr_src0_byp_test( 2, "slt", 4, 3, 0 ), gen_rr_src0_byp_test( 1, "slt", 4, 4, 0 ), gen_rr_src0_byp_test( 0, "slt", 3, 4, 1 ), ] #------------------------------------------------------------------------- # gen_src1_byp_test #------------------------------------------------------------------------- def gen_src1_byp_test(): return [ gen_rr_src1_byp_test( 5, "slt", 5, 4, 0 ), gen_rr_src1_byp_test( 4, "slt", 5, 5, 0 ), gen_rr_src1_byp_test( 3, "slt", 4, 5, 1 ), gen_rr_src1_byp_test( 2, "slt", 6, 5, 0 ), gen_rr_src1_byp_test( 1, "slt", 6, 6, 0 ), gen_rr_src1_byp_test( 0, "slt", 5, 6, 1 ), ] #------------------------------------------------------------------------- # gen_srcs_byp_test #------------------------------------------------------------------------- def gen_srcs_byp_test(): return [ gen_rr_srcs_byp_test( 5, "slt", 7, 6, 0 ), gen_rr_srcs_byp_test( 4, "slt", 7, 7, 0 ), gen_rr_srcs_byp_test( 3, "slt", 6, 7, 1 ), gen_rr_srcs_byp_test( 2, "slt", 8, 7, 0 ), gen_rr_srcs_byp_test( 1, "slt", 8, 8, 0 ), gen_rr_srcs_byp_test( 0, "slt", 7, 8, 1 ), ] #------------------------------------------------------------------------- # gen_srcs_dest_test #------------------------------------------------------------------------- def gen_srcs_dest_test(): return [ gen_rr_src0_eq_dest_test( "slt", 9, 8, 0 ), gen_rr_src1_eq_dest_test( "slt", 8, 9, 1 ), gen_rr_src0_eq_src1_test( "slt", 10, 0 ), gen_rr_srcs_eq_dest_test( "slt", 10, 0 ), ] #------------------------------------------------------------------------- # gen_value_test #------------------------------------------------------------------------- def gen_value_test(): return [ gen_rr_value_test( "slt", 0x00000000, 0x00000000, 0 ), gen_rr_value_test( "slt", 0x00000001, 0x00000001, 0 ), gen_rr_value_test( "slt", 0x00000003, 0x00000007, 1 ), gen_rr_value_test( "slt", 0x00000007, 0x00000003, 0 ), gen_rr_value_test( "slt", 0x00000000, 0xffff8000, 0 ), gen_rr_value_test( "slt", 0x80000000, 0x00000000, 1 ), gen_rr_value_test( "slt", 0x80000000, 0xffff8000, 1 ), gen_rr_value_test( "slt", 0x00000000, 0x00007fff, 1 ), gen_rr_value_test( "slt", 0x7fffffff, 0x00000000, 0 ), gen_rr_value_test( "slt", 0x7fffffff, 0x00007fff, 0 ), gen_rr_value_test( "slt", 0x80000000, 0x00007fff, 1 ), gen_rr_value_test( "slt", 0x7fffffff, 0xffff8000, 0 ), gen_rr_value_test( "slt", 0x00000000, 0xffffffff, 0 ), gen_rr_value_test( "slt", 0xffffffff, 0x00000001, 1 ), gen_rr_value_test( "slt", 0xffffffff, 0xffffffff, 0 ), ] #------------------------------------------------------------------------- # gen_random_test #------------------------------------------------------------------------- def gen_random_test(): asm_code = [] for i in xrange(100): src0 = Bits( 32, random.randint(0,0xffffffff) ) src1 = Bits( 32, random.randint(0,0xffffffff) ) dest = Bits( 32, src0.int() < src1.int() ) asm_code.append( gen_rr_value_test( "slt", src0.uint(), src1.uint(), dest.uint() ) ) return asm_code #------------------------------------------------------------------------- # test_basic #------------------------------------------------------------------------- @pytest.mark.parametrize( "name,test", [ asm_test( gen_basic_test ), asm_test( gen_dest_byp_test ), asm_test( gen_src0_byp_test ), asm_test( gen_src1_byp_test ), asm_test( gen_srcs_byp_test ), asm_test( gen_srcs_dest_test ), asm_test( gen_value_test ), asm_test( gen_random_test ), ]) def test( name, test ): sim = PisaSim( trace_en=True ) sim.load( pisa_encoding.assemble( test() ) ) sim.run()
bsd-3-clause
Redsift/offlineimap
test/OLItest/TestRunner.py
10
10132
# Copyright (C) 2012- Sebastian Spaeth & contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import imaplib import unittest import logging import os import re import sys import shutil import subprocess import tempfile import random random.seed() from offlineimap.CustomConfig import CustomConfigParser from . import default_conf class OLITestLib(): cred_file = None testdir = None """Absolute path of the current temporary test directory""" cmd = None """command that will be executed to invoke offlineimap""" def __init__(self, cred_file = None, cmd='offlineimap'): """ :param cred_file: file of the configuration snippet for authenticating against the test IMAP server(s). :param cmd: command that will be executed to invoke offlineimap""" OLITestLib.cred_file = cred_file if not os.path.isfile(cred_file): raise UserWarning("Please copy 'credentials.conf.sample' to '%s' " "and set your credentials there." % cred_file) OLITestLib.cmd = cmd @classmethod def create_test_dir(cls, suffix=''): """Creates a test directory and places OLI config there Note that this is a class method. There can only be one test directory at a time. OLITestLib is not suited for running several tests in parallel. The user is responsible for cleaning that up herself.""" assert cls.cred_file != None # creating temporary dir for testing in same dir as credentials.conf cls.testdir = os.path.abspath( tempfile.mkdtemp(prefix='tmp_%s_'%suffix, dir=os.path.dirname(cls.cred_file))) cls.write_config_file() return cls.testdir @classmethod def get_default_config(cls): """Creates a default ConfigParser file and returns it The returned config can be manipulated and then saved with write_config_file()""" #TODO, only do first time and cache then for subsequent calls? assert cls.cred_file != None assert cls.testdir != None config = CustomConfigParser() config.readfp(default_conf) default_conf.seek(0) # rewind config_file to start config.read(cls.cred_file) config.set("general", "metadata", cls.testdir) return config @classmethod def write_config_file(cls, config=None): """Creates a OLI configuration file It is created in testdir (so create_test_dir has to be called earlier) using the credentials information given (so they had to be set earlier). Failure to do either of them will raise an AssertionException. If config is None, a default one will be used via get_default_config, otherwise it needs to be a config object derived from that.""" if config is None: config = cls.get_default_config() localfolders = os.path.join(cls.testdir, 'mail') config.set("Repository Maildir", "localfolders", localfolders) with open(os.path.join(cls.testdir, 'offlineimap.conf'), "wt") as f: config.write(f) @classmethod def delete_test_dir(cls): """Deletes the current test directory The users is responsible for cleaning that up herself.""" if os.path.isdir(cls.testdir): shutil.rmtree(cls.testdir) @classmethod def run_OLI(cls): """Runs OfflineImap :returns: (rescode, stdout (as unicode)) """ try: output = subprocess.check_output( [cls.cmd, "-c%s" % os.path.join(cls.testdir, 'offlineimap.conf')], shell=False) except subprocess.CalledProcessError as e: return (e.returncode, e.output.decode('utf-8')) return (0, output.decode('utf-8')) @classmethod def delete_remote_testfolders(cls, reponame=None): """Delete all INBOX.OLITEST* folders on the remote IMAP repository reponame: All on `reponame` or all IMAP-type repositories if None""" config = cls.get_default_config() if reponame: sections = ['Repository {0}'.format(reponame)] else: sections = [r for r in config.sections() \ if r.startswith('Repository')] sections = filter(lambda s: \ config.get(s, 'Type').lower() == 'imap', sections) for sec in sections: # Connect to each IMAP repo and delete all folders # matching the folderfilter setting. We only allow basic # settings and no fancy password getting here... # 1) connect and get dir listing host = config.get(sec, 'remotehost') user = config.get(sec, 'remoteuser') passwd = config.get(sec, 'remotepass') imapobj = imaplib.IMAP4(host) imapobj.login(user, passwd) res_t, data = imapobj.list() assert res_t == 'OK' dirs = [] for d in data: if d == '': continue if isinstance(d, tuple): # literal (unquoted) folder = b'"%s"' % d[1].replace('"', '\\"') else: m = re.search(br''' [ ] # space (?P<dir> (?P<quote>"?) # starting quote ([^"]|\\")* # a non-quote or a backslashded quote (?P=quote))$ # ending quote ''', d, flags=re.VERBOSE) folder = bytearray(m.group('dir')) if not m.group('quote'): folder = '"%s"' % folder #folder = folder.replace(br'\"', b'"') # remove quoting dirs.append(folder) # 2) filter out those not starting with INBOX.OLItest and del... dirs = [d for d in dirs if d.startswith(b'"INBOX.OLItest') or d.startswith(b'"INBOX/OLItest')] for folder in dirs: res_t, data = imapobj.delete(folder) assert res_t == 'OK', "Folder deletion of {0} failed with error"\ ":\n{1} {2}".format(folder.decode('utf-8'), res_t, data) imapobj.logout() @classmethod def create_maildir(cls, folder): """Create empty maildir 'folder' in our test maildir Does not fail if it already exists""" assert cls.testdir != None maildir = os.path.join(cls.testdir, 'mail', folder) for subdir in ('','tmp','cur','new'): try: os.makedirs(os.path.join(maildir, subdir)) except OSError as e: if e.errno != 17: # 'already exists' is ok. raise @classmethod def delete_maildir(cls, folder): """Delete maildir 'folder' in our test maildir Does not fail if not existing""" assert cls.testdir != None maildir = os.path.join(cls.testdir, 'mail', folder) shutil.rmtree(maildir, ignore_errors=True) @classmethod def create_mail(cls, folder, mailfile=None, content=None): """Create a mail in maildir 'folder'/new Use default mailfilename if not given. Use some default content if not given""" assert cls.testdir != None while True: # Loop till we found a unique filename mailfile = '{0}:2,'.format(random.randint(0,999999999)) mailfilepath = os.path.join(cls.testdir, 'mail', folder, 'new', mailfile) if not os.path.isfile(mailfilepath): break with open(mailfilepath,"wb") as mailf: mailf.write(b'''From: test <test@offlineimap.org> Subject: Boo Date: 1 Jan 1980 To: test@offlineimap.org Content here.''') @classmethod def count_maildir_mails(cls, folder): """Returns the number of mails in maildir 'folder' Counting only those in cur&new (ignoring tmp).""" assert cls.testdir != None maildir = os.path.join(cls.testdir, 'mail', folder) boxes, mails = 0, 0 for dirpath, dirs, files in os.walk(maildir, False): if set(dirs) == set(['cur', 'new', 'tmp']): # New maildir folder boxes += 1 #raise RuntimeError("%s is not Maildir" % maildir) if dirpath.endswith(('/cur', '/new')): mails += len(files) return boxes, mails # find UID in a maildir filename re_uidmatch = re.compile(',U=(\d+)') @classmethod def get_maildir_uids(cls, folder): """Returns a list of maildir mail uids, 'None' if no valid uid""" assert cls.testdir != None mailfilepath = os.path.join(cls.testdir, 'mail', folder) assert os.path.isdir(mailfilepath) ret = [] for dirpath, dirs, files in os.walk(mailfilepath): if not dirpath.endswith((os.path.sep + 'new', os.path.sep + 'cur')): continue # only /new /cur are interesting for file in files: m = cls.re_uidmatch.search(file) uid = m.group(1) if m else None ret.append(uid) return ret
gpl-2.0
amunategui/phraug
unshuffle.py
5
1774
""" Unshuffle previously shuffled file unshuffle.py input_file.csv output_file.csv <max. lines in memory> <random seed> """ import sys import random input_file = sys.argv[1] output_file = sys.argv[2] try: lines_in_memory = int( sys.argv[3] ) except IndexError: lines_in_memory = 100000 print "caching %s lines at a time..." % ( lines_in_memory ) try: random_seed = sys.argv[4] random.seed( random_seed ) print "random seed: %s" % ( random_seed ) except IndexError: print "need a seed..." sys.exit( 1 ) # first count print "counting lines..." f = open( input_file ) count = 0 for line in f: count += 1 if count % 100000 == 0: print count print count # then shuffle print "(un)shuffling..." o_f = open( output_file, 'wb' ) order = range( count ) random.shuffle( order ) # un-shuffle order_dict = { shuf_i: orig_i for shuf_i, orig_i in enumerate( order ) } # sort by original key asc, will get shuffled keys in the right order to unshuffle order = sorted( order_dict, key = order_dict.get ) epoch = 0 while order: current_lines = {} current_lines_count = 0 current_chunk = order[:lines_in_memory] current_chunk_dict = { x: 1 for x in current_chunk } # faster "in" current_chunk_length = len( current_chunk ) order = order[lines_in_memory:] f.seek( 0 ) count = 0 for line in f: if count in current_chunk_dict: current_lines[count] = line current_lines_count += 1 if current_lines_count == current_chunk_length: break count += 1 if count % 100000 == 0: print count print "writing..." for l in current_chunk: o_f.write( current_lines[l] ) lines_saved = current_chunk_length + epoch * lines_in_memory epoch += 1 print "pass %s complete (%s lines saved)" % ( epoch, lines_saved )
bsd-2-clause