repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
anderscui/spellchecker | simple_checker/checker.py | 1 | 1176 | """Spelling Corrector.
Copyright 2007 Peter Norvig.
Open source code under MIT license: http://www.opensource.org/licenses/mit-license.php
"""
import re, collections
def words(text): return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
NWORDS = train(words(file('big.txt').read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b) > 1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
| mit |
artainis/django-wiki | wiki/south_migrations/0001_initial.py | 14 | 22733 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Article'
db.create_table(u'wiki_article', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('current_revision', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name=u'current_set', unique=True, null=True, to=orm['wiki.ArticleRevision'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'owned_articles', null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, on_delete=models.SET_NULL, blank=True)),
('group_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('group_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'wiki', ['Article'])
# Adding model 'ArticleForObject'
db.create_table(u'wiki_articleforobject', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'content_type_set_for_articleforobject', to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('is_mptt', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'wiki', ['ArticleForObject'])
# Adding unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.create_unique(u'wiki_articleforobject', ['content_type_id', 'object_id'])
# Adding model 'ArticleRevision'
db.create_table(u'wiki_articlerevision', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'], null=True, on_delete=models.SET_NULL, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal(u'wiki', ['ArticleRevision'])
# Adding unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.create_unique(u'wiki_articlerevision', ['article_id', 'revision_number'])
# Adding model 'URLPath'
db.create_table(u'wiki_urlpath', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name=u'children', null=True, to=orm['wiki.URLPath'])),
(u'lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal(u'wiki', ['URLPath'])
# Adding unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.create_unique(u'wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Adding model 'ArticlePlugin'
db.create_table(u'wiki_articleplugin', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'wiki', ['ArticlePlugin'])
# Adding model 'ReusablePlugin'
db.create_table(u'wiki_reusableplugin', (
(u'articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
))
db.send_create_signal(u'wiki', ['ReusablePlugin'])
# Adding M2M table for field articles on 'ReusablePlugin'
m2m_table_name = db.shorten_name(u'wiki_reusableplugin_articles')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reusableplugin', models.ForeignKey(orm[u'wiki.reusableplugin'], null=False)),
('article', models.ForeignKey(orm[u'wiki.article'], null=False))
))
db.create_unique(m2m_table_name, ['reusableplugin_id', 'article_id'])
# Adding model 'SimplePlugin'
db.create_table(u'wiki_simpleplugin', (
(u'articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
('article_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'])),
))
db.send_create_signal(u'wiki', ['SimplePlugin'])
# Adding model 'RevisionPlugin'
db.create_table(u'wiki_revisionplugin', (
(u'articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
('current_revision', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name=u'plugin_set', unique=True, null=True, to=orm['wiki.RevisionPluginRevision'])),
))
db.send_create_signal(u'wiki', ['RevisionPlugin'])
# Adding model 'RevisionPluginRevision'
db.create_table(u'wiki_revisionpluginrevision', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.RevisionPluginRevision'], null=True, on_delete=models.SET_NULL, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('plugin', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'revision_set', to=orm['wiki.RevisionPlugin'])),
))
db.send_create_signal(u'wiki', ['RevisionPluginRevision'])
def backwards(self, orm):
# Removing unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.delete_unique(u'wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Removing unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.delete_unique(u'wiki_articlerevision', ['article_id', 'revision_number'])
# Removing unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.delete_unique(u'wiki_articleforobject', ['content_type_id', 'object_id'])
# Deleting model 'Article'
db.delete_table(u'wiki_article')
# Deleting model 'ArticleForObject'
db.delete_table(u'wiki_articleforobject')
# Deleting model 'ArticleRevision'
db.delete_table(u'wiki_articlerevision')
# Deleting model 'URLPath'
db.delete_table(u'wiki_urlpath')
# Deleting model 'ArticlePlugin'
db.delete_table(u'wiki_articleplugin')
# Deleting model 'ReusablePlugin'
db.delete_table(u'wiki_reusableplugin')
# Removing M2M table for field articles on 'ReusablePlugin'
db.delete_table(db.shorten_name(u'wiki_reusableplugin_articles'))
# Deleting model 'SimplePlugin'
db.delete_table(u'wiki_simpleplugin')
# Deleting model 'RevisionPlugin'
db.delete_table(u'wiki_revisionplugin')
# Deleting model 'RevisionPluginRevision'
db.delete_table(u'wiki_revisionpluginrevision')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "u'current_set'", 'unique': 'True', 'null': 'True', 'to': u"orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_articles'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"})
},
u'wiki.articleforobject': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'ArticleForObject'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.Article']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'content_type_set_for_articleforobject'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mptt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.Article']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'wiki.articlerevision': {
'Meta': {'ordering': "(u'created',)", 'unique_together': "((u'article', u'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.ArticleRevision']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': [u'wiki.ArticlePlugin']},
u'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'shared_plugins_set'", 'symmetrical': 'False', 'to': u"orm['wiki.Article']"})
},
u'wiki.revisionplugin': {
'Meta': {'object_name': 'RevisionPlugin', '_ormbases': [u'wiki.ArticlePlugin']},
u'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "u'plugin_set'", 'unique': 'True', 'null': 'True', 'to': u"orm['wiki.RevisionPluginRevision']"})
},
u'wiki.revisionpluginrevision': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'RevisionPluginRevision'},
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'revision_set'", 'to': u"orm['wiki.RevisionPlugin']"}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.RevisionPluginRevision']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'wiki.simpleplugin': {
'Meta': {'object_name': 'SimplePlugin', '_ormbases': [u'wiki.ArticlePlugin']},
'article_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.ArticleRevision']"}),
u'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'wiki.urlpath': {
'Meta': {'unique_together': "((u'site', u'parent', u'slug'),)", 'object_name': 'URLPath'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.Article']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['wiki.URLPath']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['wiki'] | gpl-3.0 |
daenamkim/ansible | lib/ansible/modules/network/nxos/nxos_overlay_global.py | 2 | 5914 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_overlay_global
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Configures anycast gateway MAC of the switch.
description:
- Configures anycast gateway MAC of the switch.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default restores params default value
- Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE",
"EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE"
options:
anycast_gateway_mac:
description:
- Anycast gateway mac of the switch.
required: true
default: null
'''
EXAMPLES = '''
- nxos_overlay_global:
anycast_gateway_mac: "b.b.b"
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["fabric forwarding anycast-gateway-mac 000B.000B.000B"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac',
}
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.findall(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
value = ''
if has_command:
value = has_command[0]
existing[arg] = value
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if value:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, proposed in proposed_commands.items():
existing_value = existing_commands.get(key)
if proposed == 'default' and existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
elif 'anycast-gateway-mac' in key and proposed != 'default':
proposed = normalize_mac(proposed, module)
existing_value = normalize_mac(existing_value, module)
if proposed != existing_value:
command = '{0} {1}'.format(key, proposed)
commands.append(command)
if commands:
candidate.add(commands, parents=[])
def normalize_mac(proposed_mac, module):
if proposed_mac is None:
return ''
try:
if '-' in proposed_mac:
splitted_mac = proposed_mac.split('-')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
elif '.' in proposed_mac:
splitted_mac = []
splitted_dot_mac = proposed_mac.split('.')
if len(splitted_dot_mac) != 3:
raise ValueError
for octect in splitted_dot_mac:
if len(octect) > 4:
raise ValueError
else:
octect_len = len(octect)
padding = 4 - octect_len
splitted_mac.append(octect.zfill(padding+1))
elif ':' in proposed_mac:
splitted_mac = proposed_mac.split(':')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
else:
raise ValueError
except ValueError:
module.fail_json(msg='Invalid MAC address format', proposed_mac=proposed_mac)
joined_mac = ''.join(splitted_mac)
mac = [joined_mac[i:i+4] for i in range(0, len(joined_mac), 4)]
return '.'.join(mac).upper()
def main():
argument_spec = dict(
anycast_gateway_mac=dict(required=True, type='str'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
candidate = CustomNetworkConfig(indent=3)
get_commands(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
if not module.check_mode:
load_config(module, candidate)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
dakarsenegal/Plugin.Video.best | servers/vureel.py | 43 | 1587 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para vureel
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
# TODO: Este no tiene captcha, podría funcionar en free
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[vureel.py] get_video_url(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
location = scrapertools.get_match(data,'file\: "([^"]+)"')
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url(location)[-4:] + " [vureel]",location ] )
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#hhttp://www.vureel.com/video/49204
patronvideos = '(vureel.com/video/\d+)'
logger.info("[vureel.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[vureel]"
url = "http://www."+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'vureel' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-2.0 |
ShapeCompletion3D/binvox-rw-py | binvox_rw.py | 2 | 9243 | # Copyright (C) 2012 Daniel Maturana
# This file is part of binvox-rw-py.
#
# binvox-rw-py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# binvox-rw-py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with binvox-rw-py. If not, see <http://www.gnu.org/licenses/>.
#
"""
Binvox to Numpy and back.
>>> import numpy as np
>>> import binvox_rw
>>> with open('chair.binvox', 'rb') as f:
... m1 = binvox_rw.read_as_3d_array(f)
...
>>> m1.dims
[32, 32, 32]
>>> m1.scale
41.133000000000003
>>> m1.translate
[0.0, 0.0, 0.0]
>>> with open('chair_out.binvox', 'wb') as f:
... m1.write(f)
...
>>> with open('chair_out.binvox', 'rb') as f:
... m2 = binvox_rw.read_as_3d_array(f)
...
>>> m1.dims==m2.dims
True
>>> m1.scale==m2.scale
True
>>> m1.translate==m2.translate
True
>>> np.all(m1.data==m2.data)
True
>>> with open('chair.binvox', 'rb') as f:
... md = binvox_rw.read_as_3d_array(f)
...
>>> with open('chair.binvox', 'rb') as f:
... ms = binvox_rw.read_as_coord_array(f)
...
>>> data_ds = binvox_rw.dense_to_sparse(md.data)
>>> data_sd = binvox_rw.sparse_to_dense(ms.data, 32)
>>> np.all(data_sd==md.data)
True
>>> # the ordering of elements returned by numpy.nonzero changes with axis
>>> # ordering, so to compare for equality we first lexically sort the voxels.
>>> np.all(ms.data[:, np.lexsort(ms.data)] == data_ds[:, np.lexsort(data_ds)])
True
"""
import numpy as np
class Voxels(object):
""" Holds a binvox model.
data is either a three-dimensional numpy boolean array (dense representation)
or a two-dimensional numpy float array (coordinate representation).
dims, translate and scale are the model metadata.
dims are the voxel dimensions, e.g. [32, 32, 32] for a 32x32x32 model.
scale and translate relate the voxels to the original model coordinates.
To translate voxel coordinates i, j, k to original coordinates x, y, z:
x_n = (i+.5)/dims[0]
y_n = (j+.5)/dims[1]
z_n = (k+.5)/dims[2]
x = scale*x_n + translate[0]
y = scale*y_n + translate[1]
z = scale*z_n + translate[2]
"""
def __init__(self, data, dims, translate, scale, axis_order):
self.data = data
self.dims = dims
self.translate = translate
self.scale = scale
assert (axis_order in ('xzy', 'xyz'))
self.axis_order = axis_order
def clone(self):
data = self.data.copy()
dims = self.dims[:]
translate = self.translate[:]
return Voxels(data, dims, translate, self.scale, self.axis_order)
def write(self, fp):
write(self, fp)
def read_header(fp):
""" Read binvox header. Mostly meant for internal use.
"""
line = fp.readline().strip()
if not line.startswith('#binvox'):
raise IOError('Not a binvox file')
dims = map(int, fp.readline().strip().split(' ')[1:])
translate = map(float, fp.readline().strip().split(' ')[1:])
scale = map(float, fp.readline().strip().split(' ')[1:])[0]
line = fp.readline()
return dims, translate, scale
def read_as_3d_array(fp, fix_coords=True):
""" Read binary binvox format as array.
Returns the model with accompanying metadata.
Voxels are stored in a three-dimensional numpy array, which is simple and
direct, but may use a lot of memory for large models. (Storage requirements
are 8*(d^3) bytes, where d is the dimensions of the binvox model. Numpy
boolean arrays use a byte per element).
Doesn't do any checks on input except for the '#binvox' line.
"""
dims, translate, scale = read_header(fp)
raw_data = np.frombuffer(fp.read(), dtype=np.uint8)
# if just using reshape() on the raw data:
# indexing the array as array[i,j,k], the indices map into the
# coords as:
# i -> x
# j -> z
# k -> y
# if fix_coords is true, then data is rearranged so that
# mapping is
# i -> x
# j -> y
# k -> z
values, counts = raw_data[::2], raw_data[1::2]
data = np.repeat(values, counts).astype(np.bool)
data = data.reshape(dims)
if fix_coords:
# xzy to xyz TODO the right thing
data = np.transpose(data, (0, 2, 1))
axis_order = 'xyz'
else:
axis_order = 'xzy'
return Voxels(data, dims, translate, scale, axis_order)
def read_as_coord_array(fp, fix_coords=True):
""" Read binary binvox format as coordinates.
Returns binvox model with voxels in a "coordinate" representation, i.e. an
3 x N array where N is the number of nonzero voxels. Each column
corresponds to a nonzero voxel and the 3 rows are the (x, z, y) coordinates
of the voxel. (The odd ordering is due to the way binvox format lays out
data). Note that coordinates refer to the binvox voxels, without any
scaling or translation.
Use this to save memory if your model is very sparse (mostly empty).
Doesn't do any checks on input except for the '#binvox' line.
"""
dims, translate, scale = read_header(fp)
raw_data = np.frombuffer(fp.read(), dtype=np.uint8)
values, counts = raw_data[::2], raw_data[1::2]
sz = np.prod(dims)
index, end_index = 0, 0
end_indices = np.cumsum(counts)
indices = np.concatenate(([0], end_indices[:-1])).astype(end_indices.dtype)
values = values.astype(np.bool)
indices = indices[values]
end_indices = end_indices[values]
nz_voxels = []
for index, end_index in zip(indices, end_indices):
nz_voxels.extend(range(index, end_index))
nz_voxels = np.array(nz_voxels)
# TODO are these dims correct?
# according to docs,
# index = x * wxh + z * width + y; // wxh = width * height = d * d
x = nz_voxels / (dims[0]*dims[1])
zwpy = nz_voxels % (dims[0]*dims[1]) # z*w + y
z = zwpy / dims[0]
y = zwpy % dims[0]
if fix_coords:
data = np.vstack((x, y, z))
axis_order = 'xyz'
else:
data = np.vstack((x, z, y))
axis_order = 'xzy'
#return Voxels(data, dims, translate, scale, axis_order)
return Voxels(np.ascontiguousarray(data), dims, translate, scale, axis_order)
def dense_to_sparse(voxel_data, dtype=np.int):
""" From dense representation to sparse (coordinate) representation.
No coordinate reordering.
"""
if voxel_data.ndim!=3:
raise ValueError('voxel_data is wrong shape; should be 3D array.')
return np.asarray(np.nonzero(voxel_data), dtype)
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:
raise ValueError('voxel_data is wrong shape; should be 3xN array.')
if np.isscalar(dims):
dims = [dims]*3
dims = np.atleast_2d(dims).T
# truncate to integers
xyz = voxel_data.astype(np.int)
# discard voxels that fall outside dims
valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
xyz = xyz[:,valid_ix]
out = np.zeros(dims.flatten(), dtype=dtype)
out[tuple(xyz)] = True
return out
#def get_linear_index(x, y, z, dims):
#""" Assuming xzy order. (y increasing fastest.
#TODO ensure this is right when dims are not all same
#"""
#return x*(dims[1]*dims[2]) + z*dims[1] + y
def write(voxel_model, fp):
""" Write binary binvox format.
Note that when saving a model in sparse (coordinate) format, it is first
converted to dense format.
Doesn't check if the model is 'sane'.
"""
if voxel_model.data.ndim==2:
# TODO avoid conversion to dense
dense_voxel_data = sparse_to_dense(voxel_model.data, voxel_model.dims)
else:
dense_voxel_data = voxel_model.data
fp.write('#binvox 1\n')
fp.write('dim '+' '.join(map(str, voxel_model.dims))+'\n')
fp.write('translate '+' '.join(map(str, voxel_model.translate))+'\n')
fp.write('scale '+str(voxel_model.scale)+'\n')
fp.write('data\n')
if not voxel_model.axis_order in ('xzy', 'xyz'):
raise ValueError('Unsupported voxel model axis order')
if voxel_model.axis_order=='xzy':
voxels_flat = dense_voxel_data.flatten()
elif voxel_model.axis_order=='xyz':
voxels_flat = np.transpose(dense_voxel_data, (0, 2, 1)).flatten()
# keep a sort of state machine for writing run length encoding
state = voxels_flat[0]
ctr = 0
for c in voxels_flat:
if c==state:
ctr += 1
# if ctr hits max, dump
if ctr==255:
fp.write(chr(state))
fp.write(chr(ctr))
ctr = 0
else:
# if switch state, dump
fp.write(chr(state))
fp.write(chr(ctr))
state = c
ctr = 1
# flush out remainders
if ctr > 0:
fp.write(chr(state))
fp.write(chr(ctr))
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
awatts/boto | boto/cloudhsm/layer1.py | 135 | 16187 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudhsm import exceptions
class CloudHSMConnection(AWSQueryConnection):
"""
AWS CloudHSM Service
"""
APIVersion = "2014-05-30"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudhsm.us-east-1.amazonaws.com"
ServiceName = "CloudHSM"
TargetPrefix = "CloudHsmFrontendService"
ResponseError = JSONResponseError
_faults = {
"InvalidRequestException": exceptions.InvalidRequestException,
"CloudHsmServiceException": exceptions.CloudHsmServiceException,
"CloudHsmInternalException": exceptions.CloudHsmInternalException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudHSMConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_hapg(self, label):
"""
Creates a high-availability partition group. A high-
availability partition group is a group of partitions that
spans multiple physical HSMs.
:type label: string
:param label: The label of the new high-availability partition group.
"""
params = {'Label': label, }
return self.make_request(action='CreateHapg',
body=json.dumps(params))
def create_hsm(self, subnet_id, ssh_key, iam_role_arn, subscription_type,
eni_ip=None, external_id=None, client_token=None,
syslog_ip=None):
"""
Creates an uninitialized HSM instance. Running this command
provisions an HSM appliance and will result in charges to your
AWS account for the HSM.
:type subnet_id: string
:param subnet_id: The identifier of the subnet in your VPC in which to
place the HSM.
:type ssh_key: string
:param ssh_key: The SSH public key to install on the HSM.
:type eni_ip: string
:param eni_ip: The IP address to assign to the HSM's ENI.
:type iam_role_arn: string
:param iam_role_arn: The ARN of an IAM role to enable the AWS CloudHSM
service to allocate an ENI on your behalf.
:type external_id: string
:param external_id: The external ID from **IamRoleArn**, if present.
:type subscription_type: string
:param subscription_type: The subscription type.
:type client_token: string
:param client_token: A user-defined token to ensure idempotence.
Subsequent calls to this action with the same token will be
ignored.
:type syslog_ip: string
:param syslog_ip: The IP address for the syslog monitoring server.
"""
params = {
'SubnetId': subnet_id,
'SshKey': ssh_key,
'IamRoleArn': iam_role_arn,
'SubscriptionType': subscription_type,
}
if eni_ip is not None:
params['EniIp'] = eni_ip
if external_id is not None:
params['ExternalId'] = external_id
if client_token is not None:
params['ClientToken'] = client_token
if syslog_ip is not None:
params['SyslogIp'] = syslog_ip
return self.make_request(action='CreateHsm',
body=json.dumps(params))
def create_luna_client(self, certificate, label=None):
"""
Creates an HSM client.
:type label: string
:param label: The label for the client.
:type certificate: string
:param certificate: The contents of a Base64-Encoded X.509 v3
certificate to be installed on the HSMs used by this client.
"""
params = {'Certificate': certificate, }
if label is not None:
params['Label'] = label
return self.make_request(action='CreateLunaClient',
body=json.dumps(params))
def delete_hapg(self, hapg_arn):
"""
Deletes a high-availability partition group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
delete.
"""
params = {'HapgArn': hapg_arn, }
return self.make_request(action='DeleteHapg',
body=json.dumps(params))
def delete_hsm(self, hsm_arn):
"""
Deletes an HSM. Once complete, this operation cannot be undone
and your key material cannot be recovered.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM to delete.
"""
params = {'HsmArn': hsm_arn, }
return self.make_request(action='DeleteHsm',
body=json.dumps(params))
def delete_luna_client(self, client_arn):
"""
Deletes a client.
:type client_arn: string
:param client_arn: The ARN of the client to delete.
"""
params = {'ClientArn': client_arn, }
return self.make_request(action='DeleteLunaClient',
body=json.dumps(params))
def describe_hapg(self, hapg_arn):
"""
Retrieves information about a high-availability partition
group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
describe.
"""
params = {'HapgArn': hapg_arn, }
return self.make_request(action='DescribeHapg',
body=json.dumps(params))
def describe_hsm(self, hsm_arn=None, hsm_serial_number=None):
"""
Retrieves information about an HSM. You can identify the HSM
by its ARN or its serial number.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM. Either the HsmArn or the
SerialNumber parameter must be specified.
:type hsm_serial_number: string
:param hsm_serial_number: The serial number of the HSM. Either the
HsmArn or the HsmSerialNumber parameter must be specified.
"""
params = {}
if hsm_arn is not None:
params['HsmArn'] = hsm_arn
if hsm_serial_number is not None:
params['HsmSerialNumber'] = hsm_serial_number
return self.make_request(action='DescribeHsm',
body=json.dumps(params))
def describe_luna_client(self, client_arn=None,
certificate_fingerprint=None):
"""
Retrieves information about an HSM client.
:type client_arn: string
:param client_arn: The ARN of the client.
:type certificate_fingerprint: string
:param certificate_fingerprint: The certificate fingerprint.
"""
params = {}
if client_arn is not None:
params['ClientArn'] = client_arn
if certificate_fingerprint is not None:
params['CertificateFingerprint'] = certificate_fingerprint
return self.make_request(action='DescribeLunaClient',
body=json.dumps(params))
def get_config(self, client_arn, client_version, hapg_list):
"""
Gets the configuration files necessary to connect to all high
availability partition groups the client is associated with.
:type client_arn: string
:param client_arn: The ARN of the client.
:type client_version: string
:param client_version: The client version.
:type hapg_list: list
:param hapg_list: A list of ARNs that identify the high-availability
partition groups that are associated with the client.
"""
params = {
'ClientArn': client_arn,
'ClientVersion': client_version,
'HapgList': hapg_list,
}
return self.make_request(action='GetConfig',
body=json.dumps(params))
def list_available_zones(self):
"""
Lists the Availability Zones that have available AWS CloudHSM
capacity.
"""
params = {}
return self.make_request(action='ListAvailableZones',
body=json.dumps(params))
def list_hapgs(self, next_token=None):
"""
Lists the high-availability partition groups for the account.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListHapgs to retrieve the next set of items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListHapgs. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListHapgs',
body=json.dumps(params))
def list_hsms(self, next_token=None):
"""
Retrieves the identifiers of all of the HSMs provisioned for
the current customer.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListHsms to retrieve the next set of items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListHsms. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListHsms',
body=json.dumps(params))
def list_luna_clients(self, next_token=None):
"""
Lists all of the clients.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListLunaClients to retrieve the next set of
items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListLunaClients. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListLunaClients',
body=json.dumps(params))
def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None):
"""
Modifies an existing high-availability partition group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
modify.
:type label: string
:param label: The new label for the high-availability partition group.
:type partition_serial_list: list
:param partition_serial_list: The list of partition serial numbers to
make members of the high-availability partition group.
"""
params = {'HapgArn': hapg_arn, }
if label is not None:
params['Label'] = label
if partition_serial_list is not None:
params['PartitionSerialList'] = partition_serial_list
return self.make_request(action='ModifyHapg',
body=json.dumps(params))
def modify_hsm(self, hsm_arn, subnet_id=None, eni_ip=None,
iam_role_arn=None, external_id=None, syslog_ip=None):
"""
Modifies an HSM.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM to modify.
:type subnet_id: string
:param subnet_id: The new identifier of the subnet that the HSM is in.
:type eni_ip: string
:param eni_ip: The new IP address for the elastic network interface
attached to the HSM.
:type iam_role_arn: string
:param iam_role_arn: The new IAM role ARN.
:type external_id: string
:param external_id: The new external ID.
:type syslog_ip: string
:param syslog_ip: The new IP address for the syslog monitoring server.
"""
params = {'HsmArn': hsm_arn, }
if subnet_id is not None:
params['SubnetId'] = subnet_id
if eni_ip is not None:
params['EniIp'] = eni_ip
if iam_role_arn is not None:
params['IamRoleArn'] = iam_role_arn
if external_id is not None:
params['ExternalId'] = external_id
if syslog_ip is not None:
params['SyslogIp'] = syslog_ip
return self.make_request(action='ModifyHsm',
body=json.dumps(params))
def modify_luna_client(self, client_arn, certificate):
"""
Modifies the certificate used by the client.
This action can potentially start a workflow to install the
new certificate on the client's HSMs.
:type client_arn: string
:param client_arn: The ARN of the client.
:type certificate: string
:param certificate: The new certificate for the client.
"""
params = {
'ClientArn': client_arn,
'Certificate': certificate,
}
return self.make_request(action='ModifyLunaClient',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
doublebits/osf.io | scripts/tests/test_email_registration_contributors.py | 60 | 2231 | import mock
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import RegistrationFactory, UserFactory
from website import models
from scripts.email_registration_contributors import (
get_registration_contributors, send_retraction_and_embargo_addition_message,
main, MAILER, MESSAGE_NAME
)
class TestSendRetractionAndEmbargoAdditionMessage(OsfTestCase):
def setUp(self):
super(TestSendRetractionAndEmbargoAdditionMessage, self).setUp()
self.registration_contrib = UserFactory()
self.other_user = UserFactory()
self.registration = RegistrationFactory(creator=self.registration_contrib)
def tearDown(self):
super(TestSendRetractionAndEmbargoAdditionMessage, self).tearDown()
models.Node.remove()
models.User.remove()
def test_get_registration_contributors(self):
assert_equal(models.User.find().count(), 2)
registration_contributors = get_registration_contributors()
assert_equal(len(registration_contributors), 1)
@mock.patch('scripts.email_registration_contributors.send_retraction_and_embargo_addition_message')
def test_send_retraction_and_embargo_addition_message(self, mock_send_mail):
user = UserFactory()
send_retraction_and_embargo_addition_message(user, MESSAGE_NAME, MAILER, dry_run=False)
user.reload()
assert_in(MESSAGE_NAME, user.security_messages)
@mock.patch('scripts.email_registration_contributors.send_retraction_and_embargo_addition_message')
def test_dry_run_does_not_save_to_user(self, mock_send_mail):
user = UserFactory()
send_retraction_and_embargo_addition_message(user, MESSAGE_NAME, MAILER, dry_run=True)
user.reload()
assert_not_in(MESSAGE_NAME, user.security_messages)
def test_main_dry_run_True_does_save(self):
assert_equal(len(get_registration_contributors()), 1)
main(dry_run=False)
assert_equal(len(get_registration_contributors()), 0)
def test_main_dry_run_False_does_not_save(self):
assert_equal(len(get_registration_contributors()), 1)
main(dry_run=True)
assert_equal(len(get_registration_contributors()), 1)
| apache-2.0 |
textpotential/aproximatebible | edit_posts.py | 1 | 1435 | import os
import re
dir_ = 'docs/_posts'
post_filenames = os.listdir(dir_)
for filename in post_filenames:
if filename.endswith('.md'):
fullpath = os.path.join(dir_, filename)
with open(fullpath, mode='r', encoding='utf-8') as f:
contents = f.read()
# truncate filename in list after 3rd dash and before dot
# search for that truncated filename anywhere in file and replace with
# full filename minus .md
# rewrite contents to file
# for filename in post_filenames:
# post_url = os.path.splitext(filename)[0]
# post_trunc = post_url.split('-', 3)[3]
# old_ref = 'post_url ' + post_trunc.lower()
# new_ref = 'post_url ' + post_url
# contents = contents.replace(old_ref, new_ref)
# contents = contents.replace('%}', '%}{% endraw %}')
# contents = contents.replace('{% post_url', '{% raw %}{% post_url')
contents = contents.replace('(/assets', '({{site.baseurl}}/assets')
# print(contents)
# contents = contents.replace('{% raw %}', '')
# contents = contents.replace('{% endraw %}', '')
# print(contents)
with open(fullpath, mode='w', encoding='utf-8') as g:
g.write(contents)
# if new_soup:
# f.seek(0)
# f.write(str(new_soup))
# f.truncate()
| gpl-2.0 |
idem2lyon/persomov | couchpotato/core/media/_base/providers/torrent/sceneaccess.py | 44 | 5755 | import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://www.sceneaccess.eu/',
'login': 'https://www.sceneaccess.eu/login',
'login_check': 'https://www.sceneaccess.eu/inbox',
'detail': 'https://www.sceneaccess.eu/details?id=%s',
'search': 'https://www.sceneaccess.eu/browse?c%d=%d',
'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d',
'download': 'https://www.sceneaccess.eu/%s',
}
http_time_between_calls = 1 # Seconds
def _searchOnTitle(self, title, media, quality, results):
url = self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
resultsTable = html.find('table', attrs = {'id': 'torrents-table'})
if resultsTable is None:
return
entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'})
for result in entries:
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '')
results.append({
'id': torrent_id,
'name': link['title'],
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
'seeders': tryInt(seeders.string) if seeders else 0,
'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getMoreInfo(self, item):
full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'id': 'details_table'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
# Login
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'come on in',
}
def loginSuccess(self, output):
return '/inbox' in output.lower()
loginCheckSuccess = loginSuccess
config = [{
'name': 'sceneaccess',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'SceneAccess',
'description': '<a href="https://sceneaccess.eu/">SceneAccess</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
| gpl-3.0 |
spacether/pycalculix | pycalculix/partmodule.py | 1 | 40690 | """This module stores the Part class. It is used to make 2D parts.
"""
import numpy as np # needed for linspace on hole creation
from . import base_classes
from . import geometry #point, line, area
class Part(base_classes.Idobj):
"""This makes a part.
Args:
parent: parent FeaModel
Attributes:
__fea (FeaModel): parent FeaModel
points (list): list or part points, excludes arc centers
allpoints (list): list or part points, includes arc centers
lines (list): list of all Line and Arc that make the part
signlines (list): list of all SignLine and SignArc that make the part
__cursor (Point): location of a cursor drawing the part
__holemode (bool): if True, lines will be added to holes, otherwise,
they'll be added to areas
areas (list): list of Area that make up the part
left (list): list the parts leftmost lines, they must be vertical
right (list): list the parts rightmost lines, they must be vertical
top (list): list the parts top lines, they must be horizontal
bottom (list): list the parts bottom lines, they must be horizontal
center (Point): the area centroid of the part
nodes (list): list of part's nodes
elements (list): list of part's elements
"""
def __init__(self, feamodel):
self.fea = feamodel
self.__cursor = geometry.Point(0, 0)
self.areas = [] # top area is the buffer
# make the buffer
area = self.fea.areas.append(geometry.Area(self, []))
self.areas.append(area)
base_classes.Idobj.__init__(self)
self.left = None
self.right = None
self.top = None
self.bottom = None
self.center = None
self.nodes = []
self.elements = []
self.__holemode = False
self.fea.parts.append(self)
def __hash__(self):
"""Returns the item's id as its hash."""
return self.id
@property
def lines(self):
"""Returns list of part lines."""
lines = set()
for area in self.areas:
lines.update(area.lines)
return list(lines)
@property
def signlines(self):
"""Returns list of part signline and signarc."""
lines = set()
for area in self.areas:
lines.update(area.signlines)
return list(lines)
@property
def points(self):
"""Returns list of part points, excludes arc centers."""
points = set()
lines = self.lines
for line in lines:
points.update(line.points)
return list(points)
@property
def allpoints(self):
"""Returns list of part points, includes arc centers."""
points = set()
lines = self.lines
for line in lines:
points.update(line.allpoints)
return list(points)
def get_item(self, item):
""""Returns the part's item(s) requested by the passed string.
Args:
item (str): string requesting item(s)
* Valid examples: 'P0', 'L0', 'left', 'A0'
Returns:
item(s) or None: If items are found they are returned
* If there is only one item it is returned
* If there are multiple items, they are returned as a list
* If no items are found None is returned
"""
if item in ['left', 'right', 'top', 'bottom']:
items = getattr(self, item)
return items
elif item[0] == 'P':
# get point
items = self.points
num = int(item[1:])
res = [a for a in items if a.id == num]
return res[0]
elif item[0] == 'L':
# get line
items = self.signlines
num = int(item[1:])
items = [a for a in items if a.id == num]
return items[0]
elif item[0] == 'A':
# get area
items = self.areas
num = int(item[1:])
items = [a for a in items if a.id == num]
return items[0]
else:
print('Unknown item! Please pass the name of a point, line or area!')
return None
def get_name(self):
"""Returns the part name based on id number."""
return 'PART'+str(self.id)
def __set_side(self, side):
"""Sets the part.side to a list of lines on that side of the part.
Used to set the part.left, part.right, part.top, part.bottom sides.
Args:
side (string): 'left', 'right', 'top','bottom'
"""
# set index and axis, ind=0 is low side, ind=-1 is high side
inds = {'left':0, 'right':-1, 'top':-1, 'bottom':0}
axes = {'left':'y', 'right':'y', 'top':'x', 'bottom':'x'}
ind = inds[side]
axis = axes[side]
# loc = 'left', ind = 0, axis = 'y'
points = self.points
# sort the points low to high
points = sorted(points, key=lambda pt: getattr(pt, axis))
# store the target value
target_value = getattr(points[ind], axis)
res = []
lines = self.signlines
for sline in lines:
if isinstance(sline, geometry.SignLine):
pt_axis_vals = [getattr(pt, axis) for pt in sline.points]
pt_dist_vals = [abs(target_value - pt_axis_val) for pt_axis_val in pt_axis_vals]
if all([pt_dist_val < geometry.ACC for pt_dist_val in pt_dist_vals]):
# line is on the left side
res.append(sline)
setattr(self, side, res)
def goto(self, x, y, holemode=False):
"""Moves the part cursor to a location.
If that location has a point at it, use it.
If not, make a new point at that location.
Args:
x (float): x-coordinate of the point to go to
y (float): y-coordinate of the point to go to
holemode (bool): if True, we start drawing a hole here, otherwise
we start drawing an area
Returns:
self.__cursor (Point): returns the updated cursor point
"""
[pnew, already_exists] = self.__make_get_pt(x, y)
if already_exists:
if self.areas[-1].closed == True:
# make a new area if the old area is already closed and we're
# going to an existing point
area = self.fea.areas.append(geometry.Area(self, []))
self.areas.append(area)
# return cursor
self.__cursor = pnew
self.__holemode = holemode
return self.__cursor
def __get_point(self, point):
"""Returns point if found, None otherwise."""
points = self.allpoints
found_point = None
for apoint in points:
dist = point - apoint
dist = dist.length()
if dist < geometry.ACC:
# point already exists in part, use it
found_point = apoint
break
return found_point
def __make_get_pt(self, x, y):
"""Gets a point if it exists, makes it if it doesn't. Returns the point.
Use this when you need a point made in the part, and you want to
use an extant point if one is available.
Args:
x (float): point x-coordinate
y (float): point y-coordinate
Returns:
list:
list[0]: Point
list[1]: boolean, True = the point already existed
"""
thept = geometry.Point(x, y)
pfound = self.__get_point(thept)
pexists = True
if pfound == None:
pfound = thept
self.fea.register(pfound)
pexists = False
return [pfound, pexists]
def __calc_area_center(self):
"""Calculates and returns the part and centroid Point.
Returns:
list: [area, Point]
"""
val_list = []
for area in self.areas:
if area.closed == True:
aval, cval = area.area, area.center
val_list.append([aval, cval])
a_sum = sum([aval[0] for aval in val_list])
cxa_sum = sum([center.x*aval for [aval, center] in val_list])
cya_sum = sum([center.y*aval for [aval, center] in val_list])
cx_val = cxa_sum/a_sum
cy_val = cya_sum/a_sum
center = geometry.Point(cx_val, cy_val)
return [a_sum, center]
def __make_get_sline(self, lnew):
"""Returns a signed line or arc, makes it if it needs to.
Args:
lnew (Line or Arc or SignLine or SignArc): Line or Arc to make
Returns:
list:
list[0]: SignLine or SignArc
list[1]: boolean, True = the line already existed
"""
lpos = lnew.signed_copy(1)
lneg = lnew.signed_copy(-1)
# get part's signed lines
slines = self.signlines
lexists = False
for sline in slines:
if lpos == sline:
lexists = True
signline_new = sline
break
elif lneg == sline:
lexists = True
signline_new = sline.signed_copy(-1)
signline_new.edge = False
self.fea.register(signline_new)
break
else:
# fired when we haven't broken out of the loop, the line is new
if isinstance(lnew, geometry.SignLine):
lnew = geometry.Line(lnew.pt(0), lnew.pt(1))
self.fea.register(lnew)
lnew.save_to_points()
signline_new = lnew.signed_copy(1)
self.fea.register(signline_new)
signline_new.line.add_signline(signline_new)
return [signline_new, lexists]
def draw_circle(self, center_x, center_y, radius, num_arcs=4):
"""Draws a circle area and adds it to the part.
Args:
center_x (float): x-axis hole center
center_y (float): y-axis hole center
radius (float): hole radius
num_arcs (int): number of arcs to use, must be >= 3
Returns:
loop (geometry.LineLoop): a LineLoop list of SignArc
"""
center = geometry.Point(center_x, center_y)
rvect = geometry.Point(0, radius)
start = center + rvect
self.goto(start.x, start.y)
angles = np.linspace(360/num_arcs,360,num_arcs, endpoint=True)
for ang in angles:
point = geometry.Point(0, radius).rot_ccw_deg(ang)
point = point + center
self.draw_arc(point.x, point.y, center.x, center.y)
loop = self.areas[-1].exlines
self.__update()
return loop
def draw_hole(self, center_x, center_y, radius, num_arcs=4, filled=False):
"""Makes a hole in the part.
Args:
center_x (float): x-axis hole center
center_y (float): y-axis hole center
radius (float): hole radius
num_arcs (int): number of arcs to use, must be >= 3
filled (bool): whether to fill the hole
* True: makes a new area in the part
Returns:
hole_lines (list or None): list of hole SignLine or SignArc
* Returns None if hole was not made.
"""
center = geometry.Point(center_x, center_y)
area = self.__area_from_pt(center)
if area == None:
print("You can't make a hole here until there's an area here!")
return None
else:
# make points
rvect = geometry.Point(0, radius)
start = center + rvect
self.goto(start.x, start.y, holemode=True)
angles = np.linspace(360/num_arcs,360,num_arcs, endpoint=True)
for ang in angles:
point = geometry.Point(0, radius).rot_ccw_deg(ang)
point = point + center
self.draw_arc(point.x, point.y, center.x, center.y)
# make new area
if filled:
# reverse order, reverse sline directions, store in feamodel
slines = list(area.holes[-1])
slines.reverse()
slines = [sline.signed_copy(-1) for sline in slines]
slines = [self.__make_get_sline(sline)[0] for sline in slines]
anew = self.fea.areas.append(geometry.Area(self, slines))
self.areas.append(anew)
self.__update()
return area.holes[-1]
def draw_arc_angle(self, degrees_ccw, center_x, center_y):
"""Makes an arc and adds it to the part.
| Current point is the first arc point.
| degrees_ccw is the swept angle in degrees, counterclockwise
| (center_x, center_y) is the arc center
| Degrees: Traversed angle of arc must be < 180 degrees
Args:
degrees_ccw (float): arc swept angle in degrees, counterclockwise
center_x (float): arc center point x-coordinate
center_y (float): arc center point y-coordinate
Returns:
list: [arc, arc_start_point, arc_end_point]
"""
center = geometry.Point(center_x, center_y)
radius_vector = self.__cursor - center
radius_vector.rot_ccw_deg(degrees_ccw)
end = center + radius_vector
return self.draw_arc(end.x, end.y, center_x, center_y)
def draw_arc(self, end_x, end_y, center_x, center_y):
"""Makes an arc and adds it to the part.
| Current point is the first arc point.
| (end_x, end_y) is the end point
| (center_x, center_y) is the arc center
| Degrees: Traversed angle of arc must be < 180 degrees
| Radians: Traversed angle of arc must be < Pi
Args:
end_x (float): arc end point x-coordinate
end_y (float): arc end point y-coordinate
center_x (float): arc center point x-coordinate
center_y (float): arc center point y-coordinate
Returns:
list: [arc, arc_start_point, arc_end_point]
"""
pold = self.__cursor
# make arc center point
ctr = self.__make_get_pt(center_x, center_y)[0]
# make arc end point
self.__cursor = self.__make_get_pt(end_x, end_y)[0]
# make arc
arc = self.__make_get_sline(geometry.Arc(pold, self.__cursor, ctr))[0]
if self.__holemode:
area = self.__area_from_pt(self.__cursor)
if area != None:
closed = area.add_hole_sline(arc)
if closed:
self.__holemode = False
else:
print('You must have a closed area here before making a hole!')
else:
self.areas[-1].add_sline(arc)
return [arc, pold, self.__cursor]
def draw_line_delta(self, delta_x, delta_y):
"""Draws a line a relative distance, and adds it to the part.
Args:
delta_x (float): x-axis delta distance to draw the line
delta_y (float): y-axis delta distance to draw the line
Returns:
list: [line, point_start, point_end]
"""
x = self.__cursor.x + delta_x
y = self.__cursor.y + delta_y
return self.draw_line_to(x, y)
def draw_line_rad(self, dx_rad):
"""Draws a line a relative radial distance, and adds it to the part.
Args:
dx_rad (float): x-axis delta distance to draw the line
Returns:
list: [line, point_start, point_end]
"""
return self.draw_line_delta(dx_rad, 0.0)
def draw_line_ax(self, dy_ax):
"""Draws a line a relative axial distance, and adds it to the part.
Args:
dy_ax (float): y-axis delta distance to draw the line
Returns:
list: [line, point_start, point_end]
"""
return self.draw_line_delta(0.0, dy_ax)
def draw_line_to(self, x, y):
"""Draws a line to the given location, and adds it to the part.
Args:
x (float): x-axis coordinate of the end point
y (float): y-axis coordinate of the end point
Returns:
list: [SignLine, point_start, point_end]
"""
pold = self.__cursor
self.__cursor = self.__make_get_pt(x, y)[0]
sline = self.__make_get_sline(geometry.Line(pold, self.__cursor))[0]
if self.__holemode:
area = self.__area_from_pt(self.__cursor)
if area != None:
closed = area.add_hole_sline(sline)
if closed:
self.__holemode = False
self.__update()
else:
print('You must have a closed area here before making a hole!')
else:
# drawing in last area
self.areas[-1].add_sline(sline)
# check for closure of the area
if self.areas[-1].closed:
self.__update()
return [sline, pold, self.__cursor]
def __get_maxlength(self):
"""Returns the max distance between points in the part."""
points = self.points
maxlen = 0.0
# loop through points checking dist to next point
for ind, point_1 in enumerate(points[:-1]):
for point_2 in points[ind:]:
vect = point_1 - point_2
dist = vect.length()
if dist > maxlen:
maxlen = dist
return maxlen
def __area_from_pt(self, point):
"""Returns the area that the point is inside.
Args:
point (Point): the point we are asking about
Returns:
Area or None:
Area is the found area
None is returned if the point is not in one of this part's areas
"""
for area in self.areas:
if area.contains_point(point):
return area
return None
def fillet_lines(self, line1, line2, radius):
"""Fillets the given lines in the part.
This inserts an arc in the part tangent to the two given lines.
Args:
line1 (SignLine): line that the arc starts on, arc is tangent
line2 (SignLine): line that the arc ends on, arc is tangent
radius (float): arc radius size
Returns:
list: [arc, start_point, end_point]
"""
# check if the lines are touching
if not line1.line.touches(line2.line):
print('ERROR: Cannot fillet! Lines must touch!')
return
if line1.line.pt(1) == line2.line.pt(0):
first_line = line1
second_line = line2
elif line2.line.pt(1) == line1.line.pt(0):
first_line = line2
second_line = line1
else:
print('ERROR: Sign lines must both be going in CW or CCW '
'direction. The two passed lines are going in '
'different directions. Unable to fillet them.')
return
tmp = self.__cursor
# offset the lines, assuming area is being traced clockwise
# get the intersection point
magnitude = radius
l1_off = first_line.offset(magnitude)
l2_off = second_line.offset(magnitude)
ctrpt = l1_off.intersects(l2_off)
if ctrpt == None:
# flip the offset direction if lines don't intersect
magnitude = -radius
l1_off = first_line.offset(magnitude)
l2_off = second_line.offset(magnitude)
ctrpt = l1_off.intersects(l2_off)
# now we have an intersecting point
p1_new = first_line.arc_tang_intersection(ctrpt, magnitude)
p2_new = second_line.arc_tang_intersection(ctrpt, magnitude)
rempt = first_line.pt(1)
p1_new = self.__make_get_pt(p1_new.x, p1_new.y)[0]
ctrpt = self.__make_get_pt(ctrpt.x, ctrpt.y)[0]
p2_new = self.__make_get_pt(p2_new.x, p2_new.y)[0]
# make the new arc
arc = self.__make_get_sline(geometry.Arc(p1_new, p2_new, ctrpt))[0]
# put the arc in the right location in the area
area = first_line.lineloop.parent
area.line_insert(first_line, arc)
print('Arc inserted into area %i' % (area.id))
# edit the adjacent lines to replace the removed pt
first_line.set_pt(1, arc.pt(0))
second_line.set_pt(0, arc.pt(1))
# del old pt, store new points for the arc
self.fea.points.remove(rempt)
# reset the cursor to where it should be
self.__cursor = tmp
return [arc, arc.pt(0), arc.pt(1)]
def fillet_all(self, radius):
"""Fillets all external lines not within 10 degrees of tangency
Args:
radius (float): the fillet radius to use
Returns:
arcs (list): list of SignArc
"""
pairs = []
for area in self.areas:
for ind, sline in enumerate(area.exlines):
prev_sline = area.exlines[ind-1]
this_point = sline.pt(0)
if len(this_point.lines) == 2:
# only fillet lines that are not shared by other areas
if (isinstance(sline, geometry.SignLine)
and isinstance(prev_sline, geometry.SignLine)):
# only fillet lines
perp1 = prev_sline.get_perp_vec(this_point)
perp2 = sline.get_perp_vec(this_point)
ang = perp1.ang_bet_deg(perp2)
is_tangent = (-10 <= ang <= 10)
if is_tangent == False:
pairs.append([prev_sline, sline])
arcs = []
for pair in pairs:
arc = self.fillet_lines(pair[0], pair[1], radius)[0]
arcs.append(arc)
return arcs
def label(self, axis):
"""Labels the part on a Matplotlib axis
Args:
axis (Matplotlib Axis): Matplotlib Axis
"""
axis.text(self.center.y, self.center.x, self.get_name(),
ha='center', va='center')
def plot(self, axis, label=True, color='yellow'):
"""Plots the part on the passed Matplotlib axis.
Args:
axis (Matplotlib axis): the axis we will plot on
label (bool): True displays the part label
color (tuple): r,g,b,a matplotlib color tuple
"""
patches = []
for area in self.areas:
if area.closed:
patches.append(area.get_patch())
for patch in patches:
patch.set_color(color)
axis.add_patch(patch)
# apply the label
if label:
self.label(axis)
def __cut_line(self, point, line):
"""Cuts the passed line at the passed point.
The passed line is cut into two lines. All areas that included the
original line are updated.
Args:
line (Line or Arc): the line to cut, must be Line or Arc
point (Point): the location on the line that we will cut it
Returns:
list: [pnew, lnew]
pnew: the new point we created to cut the original line
lnew: the new line we created, the end half of the orignal line
"""
pnew = self.__make_get_pt(point.x, point.y)[0]
if point.id != -1:
# if passed point already exists, use it
pnew = point
pend = line.pt(1)
line.set_pt(1, pnew) # shortens the line
new_prim = geometry.Line(pnew, pend)
if isinstance(line, geometry.Arc):
new_prim = geometry.Arc(pnew, pend, line.actr)
new_sline = self.__make_get_sline(new_prim)[0]
# insert the new line into existing areas
is_line = isinstance(line, geometry.Line) or isinstance(line, geometry.Arc)
is_sline = isinstance(line, geometry.SignLine) or isinstance(line, geometry.SignArc)
print('Cutting line (is_line, is_sline, signlines) (%s, %s, %i)' % (is_line, is_sline, len(line.signlines)))
slines = line.signlines
for sline in slines:
area = sline.lineloop.parent
if sline.sign == 1:
# cutting line in clockwise area, where line is pos
area.line_insert(sline, new_sline)
elif sline.sign == -1:
# cutting line in clockwise area, where line is neg
rev_sline = self.__make_get_sline(new_sline.signed_copy(-1))[0]
area.line_insert(sline, rev_sline, after=False)
return [pnew, new_sline]
def __cut_area(self, area, start_pt, end_pt):
"""Cuts the part area from start_pt to end_pt."""
# select the line portions that define the areas
# list[:low] excludes low index
# list [high:] includes high index
# we want the line which start with the point
lpre_start = area.line_from_startpt(start_pt)
lpre_end = area.line_from_startpt(end_pt)
if lpre_start == None or lpre_end == None:
self.fea.plot_geometry()
print(area.exlines)
istart = area.exlines.index(lpre_start)
iend = area.exlines.index(lpre_end)
low = min(istart, iend)
high = max(istart, iend)
# lists of lines for areas
beg = area.exlines[:low]
mid = area.exlines[low:high]
end = area.exlines[high:]
# make cut line for [beg + cut + end] area
start_pt = mid[0].pt(0)
end_pt = mid[-1].pt(1)
fwd = geometry.Line(start_pt, end_pt)
rev = geometry.Line(end_pt, start_pt)
# update existing area
cline = self.__make_get_sline(fwd)[0]
alist_curr = beg + [cline] + end
area.update(alist_curr)
# make new area
cline_rev = self.__make_get_sline(rev)[0]
alist_other = mid + [cline_rev]
anew = geometry.Area(self, alist_other)
self.fea.register(anew)
self.areas.append(anew)
# fix holes
self.__store_holes()
def __merge_hole(self, area, start_pt, end_pt):
"""Merges the hole into its area with a line between passed points."""
# line will be drawn from start point on exlines to end point on hole
hole_points = area.holepoints
if start_pt in hole_points:
tmp = start_pt
start_pt = end_pt
end_pt = tmp
lpre_start = area.line_from_startpt(start_pt)
hole_line = area.line_from_startpt(end_pt)
if lpre_start == None or hole_line == None:
self.fea.plot_geometry()
ind = area.exlines.index(lpre_start)
# store sections of the area
beg = area.exlines[:ind]
end = area.exlines[ind:]
thehole = None
mid = []
for hole in area.holes:
for sline in hole:
if sline == hole_line:
ind = hole.index(sline)
mid = hole[ind:] + hole[:ind]
thehole = hole
break
if mid != []:
break
fwd = geometry.Line(start_pt, end_pt)
fwd_sline = self.__make_get_sline(fwd)[0]
rev_sline = fwd_sline.signed_copy(-1)
self.fea.register(rev_sline)
rev_sline.line.add_signline(rev_sline)
alist_curr = beg + [fwd_sline] + mid + [rev_sline] + end
area.holes.remove(thehole)
area.update(alist_curr)
def __get_cut_line(self, cutline):
"""Returns a cut line beginning and ending on the part."""
# find all intersections
lines = self.lines
points = set()
# add line intersections
for line in lines:
newpt = line.intersects(cutline)
if newpt != None:
points.add(newpt)
# loop through intersection points, storing distance
points = list(points)
for (ind, point) in enumerate(points):
dist = point - cutline.pt(0)
dist = dist.length()
pdict = {'dist': dist, 'point': point}
points[ind] = pdict
# sort the points by dist, lowest to highest, return first cut
points = sorted(points, key=lambda k: k['dist'])
start = points[0]['point']
end = points[1]['point']
new_cut = geometry.Line(start, end)
return new_cut
def __cut_with_line(self, cutline, debug):
"""Cuts the part using the passed line.
Args:
cutline (Line): line to cut the area with
debug (list): bool for printing, bool for plotting after every cut
"""
# find all intersections
lines = self.lines
points = set()
# add line intersections
for line in lines:
if debug[0]:
print('Checking X between %s and cutline' % line.get_name())
newpt = line.intersects(cutline)
if debug[0]:
print(' Intersection: %s' % newpt)
if newpt != None:
points.add(newpt)
# loop through intersection points, storing distance and lines to cut
points = list(points)
for (ind, point) in enumerate(points):
dist = point - cutline.pt(0)
dist = dist.length()
pdict = {'dist': dist}
realpt = self.__get_point(point)
# we only want to store lines to cut here
if realpt == None or realpt.arc_center == True:
# we could have an existing arc center on a line that needs to
# be cut
if realpt == None:
realpt = point
for line in lines:
point_on_line = line.coincident(realpt)
if point_on_line and point not in line.points:
pdict['line'] = line
break
pdict['point'] = realpt
points[ind] = pdict
# sort the points by dist, lowest to highest
points = sorted(points, key=lambda k: k['dist'])
if debug[0]:
print('==================================')
print('Points on the cutline!------------')
for pdict in points:
print(pdict['point'])
print(' dist %.3f' % pdict['dist'])
if 'line' in pdict:
print(' X cut line: %s' % pdict['line'])
print('==================================')
# loop through the points cutting areas
for ind in range(len(points)):
pdict = points[ind]
start_pt = pdict['point']
if 'line' in pdict:
# cut the line and point to the real new point
print('Cut through line %s' % pdict['line'].get_name())
pnew = self.__cut_line(start_pt, pdict['line'])[0]
points[ind]['point'] = pnew
start_pt = pnew
end_pt = None
pavg = None
area = None
if ind > 0:
# find the area we're working on
end_pt = points[ind-1]['point']
pavg = start_pt + end_pt
pavg = pavg*0.5
area = self.__area_from_pt(pavg)
if area == None:
# stop cutting if we are trying to cut through a holes
print('No area found at point avg, no cut made')
break
start_hole = start_pt in area.holepoints
end_hole = end_pt in area.holepoints
if start_hole and end_hole and area != None:
print('Trying to join holes, no cut made')
break
# stop cutting if we are trying to join holes
if end_hole == True or start_hole == True:
print('Merging hole in %s' % area.get_name())
self.__merge_hole(area, start_pt, end_pt)
else:
print('Cutting %s' % area.get_name())
self.__cut_area(area, start_pt, end_pt)
if debug[1]:
self.fea.plot_geometry()
def __store_holes(self):
"""Puts all holes in their correct areas"""
holes = []
for area in self.areas:
holes += area.holes
for hole in holes:
hole_area = hole.parent
for area in self.areas:
is_inside = hole.inside(area.exlines)
if is_inside == True:
if area != hole_area:
# delete the hole from the old area, move it to the new
hole.set_parent(area)
hole_area.holes.remove(hole)
hole_area.close()
area.holes.append(hole)
area.close()
afrom, ato = hole_area.get_name(), area.get_name()
print('Hole moved from %s to %s' % (afrom, ato))
def __vect_to_line(self, point, cvect):
"""Returns a cutting line at a given point and cutting vector.
Args:
point (Point): the location we are cutting from
cvect (Point): the vector direction of the cut from pt
Returns:
cutline (Line): cut line
"""
cvect.make_unit()
vsize = self.__get_maxlength()
endpt = point + cvect*vsize
cutline = geometry.Line(point, endpt)
cutline = self.__get_cut_line(cutline)
return cutline
def __chunk_area(self, area, mode, exclude_convex, debug):
"""Cuts the passed area into regular smaller areas.
The cgx mesher only accepts areas which are 3-5 sides
so one may need to call this before using that mesher.
Cuts are made perpendicular to tangent points or at
internal corners.
At internal corners two perpendicular cuts are made.
Args:
area (Area): the area to cut into smaller areas
mode (str): 'both', 'holes' or 'ext' chunks the area using the
points form this set. See part.chunk
exclude_convex (bool): If true exclude cutting convex tangent points
debug (list): bool for printing, bool for plotting after every cut
"""
# store the cuts first, then cut after
cuts = [] # each item is a dict with a pt and vect in it
loops = []
cut_point_sets = []
if mode == 'holes':
loops = area.holes
elif mode == 'ext':
loops = [area.exlines]
elif mode == 'both':
loops = area.holes + [area.exlines]
for loop in loops:
for ind, line in enumerate(loop):
line_pre = loop[ind-1]
line_post = line
point = line_pre.pt(1)
perp1 = line_pre.get_perp_vec(point)
perp2 = line_post.get_perp_vec(point)
#tan1 = line_pre.get_tan_vec(point)
#tan2 = line_post.get_tan_vec(point)
# flip these vectors later to make them cut the area(s)
ang = perp1.ang_bet_deg(perp2)
cut = {}
make_cut = True
pre_arc = isinstance(line_pre, geometry.SignArc)
post_arc = isinstance(line_post, geometry.SignArc)
if pre_arc or post_arc:
if pre_arc and post_arc:
if (line_pre.concavity == 'convex'
and line_post.concavity == 'convex'
and exclude_convex == True):
make_cut = False
else:
# only one is an arc
arc = line_pre
if post_arc:
arc = line_post
if arc.concavity == 'convex' and exclude_convex == True:
make_cut = False
is_tangent = (-10 <= ang <= 10)
is_int_corner = (45 <= ang <= 135)
"""
print('-------------------')
print('%s' % point)
print('Angle is %.3f' % ang)
print('Make cut %s' % make_cut)
print('is_tangent %s' % is_tangent)
print('is_int_corner %s' % is_int_corner)
"""
if is_tangent:
if make_cut == True:
# tangent
cut = {'pt':point, 'vect':perp1*-1}
cut_line = self.__vect_to_line(cut['pt'], cut['vect'])
pset = set(cut_line.points)
if pset not in cut_point_sets:
cut_point_sets.append(pset)
cut['line'] = cut_line
cuts.append(cut)
elif is_int_corner:
# internal corner
cut = {'pt':point, 'vect':perp1*-1}
cut_line = self.__vect_to_line(cut['pt'], cut['vect'])
pset = set(cut_line.points)
if pset not in cut_point_sets:
cut_point_sets.append(pset)
cut['line'] = cut_line
cuts.append(cut)
cut = {'pt':point, 'vect':perp2*-1}
cut_line = self.__vect_to_line(cut['pt'], cut['vect'])
pset = set(cut_line.points)
if pset not in cut_point_sets:
cut_point_sets.append(pset)
cut['line'] = cut_line
cuts.append(cut)
elif ang < 0:
# external corner
# do not split these
pass
# do the cuts
for cut in cuts:
print('--------------------')
print('Cut point:', cut['pt'].get_name())
print('Cut line:', cut['line'])
self.__cut_with_line(cut['line'], debug)
def chunk(self, mode='both', exclude_convex = True, debug=[0, 0]):
"""Chunks all areas in the part.
Args:
mode (str): area chunking mode
- 'both': cuts areas using holes and exterior points
- 'holes': cut areas using holes points only
- 'ext': cut areas using exterior points only
exclude_convex (bool): If true exclude cutting convex tangent points
"""
for area in self.areas:
if area.closed:
min_sides = 5
has_holes = len(area.holes) > 0
ext_gr = len(area.exlines) > min_sides
both_false = (has_holes == False and ext_gr == False)
if mode == 'holes' and has_holes:
self.__chunk_area(area, mode, exclude_convex, debug)
elif (mode == 'both'
and (has_holes or ext_gr or not exclude_convex)):
self.__chunk_area(area, mode, exclude_convex, debug)
elif mode == 'ext' and (ext_gr or not exclude_convex):
self.__chunk_area(area, mode, exclude_convex, debug)
else:
aname = area.get_name()
val = 'Area %s was not chunked because it had' % aname
adder = ''
if mode == 'both' and both_false:
adder = '<= %i lines and no holes.' % min_sides
elif has_holes == False and (mode in ['both', 'holes']):
adder = 'no holes.'
elif ext_gr == False and (mode in ['both', 'ext']):
adder = '<= %i lines.' % min_sides
print('%s %s' % (val, adder))
# store the left, right, top, and bottom lines
self.__update()
def __update(self):
"""Updates the left, right, top, bottom sides and area and center"""
self.__set_side('left')
self.__set_side('right')
self.__set_side('top')
self.__set_side('bottom')
self.area, self.center = self.__calc_area_center()
def __str__(self):
"""Returns string listing object type, id number and name."""
val = 'Part, id=%i name=%s' % (self.id, self.get_name())
return val
| apache-2.0 |
davehunt/kuma | kuma/actioncounters/tests/tests.py | 9 | 8421 | from django.core.exceptions import MultipleObjectsReturned
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.http import HttpRequest
from django.test import TransactionTestCase
from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from kuma.core.utils import get_unique
from ..models import ActionCounterUnique
from .models import TestModel
class ActionCountersTest(TransactionTestCase):
def setUp(self):
super(ActionCountersTest, self).setUp()
User = get_user_model()
self.user1 = User.objects.create_user(
'tester1', 'tester2@tester.com', 'tester1')
self.user2 = User.objects.create_user(
'tester2', 'tester2@tester.com', 'tester2')
self.obj_1 = TestModel(title="alpha")
self.obj_1.save()
def mk_request(self, user=None, ip='192.168.123.123', user_agent='FakeBrowser 1.0'):
request = HttpRequest()
request.user = user and user or AnonymousUser()
request.method = 'GET'
request.META['REMOTE_ADDR'] = ip
request.META['HTTP_USER_AGENT'] = user_agent
return request
@attr('bug694544')
def test_bug694544(self):
"""Bug 694544: unicode character in request details should not break"""
try:
action_name = "likes"
obj_1 = self.obj_1
obj_1_ct = ContentType.objects.get_for_model(obj_1)
request = self.mk_request(user_agent=u"Some\xef\xbf\xbdbrowser")
user, ip, user_agent, unique_hash = get_unique(obj_1_ct,
obj_1.pk,
name=action_name,
request=request)
except UnicodeDecodeError:
ok_(False, "UnicodeDecodeError should not be thrown")
@attr('bad_multiple')
def test_bad_multiple_counters(self):
"""
Force multiple counters, possibly result of race condition,
ensure graceful handling
"""
action_name = "likes"
obj_1 = self.obj_1
obj_1_ct = ContentType.objects.get_for_model(obj_1)
request = self.mk_request()
user, ip, user_agent, unique_hash = get_unique(obj_1_ct, obj_1.pk,
name=action_name,
request=request)
# Create an initial counter record directly.
u1 = ActionCounterUnique(content_type=obj_1_ct, object_pk=obj_1.pk,
name=action_name, total=1, ip=ip,
user_agent=user_agent, user=user)
u1.save()
# Adding a duplicate counter should be prevented at the model level.
try:
u2 = ActionCounterUnique(content_type=obj_1_ct, object_pk=obj_1.pk,
name=action_name, total=1, ip=ip,
user_agent=user_agent, user=user)
u2.save()
ok_(False, "This should have triggered an IntegrityError")
except:
pass
# Try get_unique_for_request, which should turn up the single unique
# record created earlier.
try:
(u, created) = ActionCounterUnique.objects.get_unique_for_request(
obj_1, action_name, request)
eq_(False, created)
except MultipleObjectsReturned:
ok_(False, "MultipleObjectsReturned should not be raised")
def test_basic_action_increment(self):
"""Action attempted with several different kinds of unique identifiers"""
obj_1 = self.obj_1
# set up request for anonymous user #1
request = self.mk_request()
# anonymous user #1 likes user2
obj_1.likes.increment(request)
eq_(1, obj_1.likes.total)
# anonymous user #1 likes user2, again
obj_1.likes.increment(request)
eq_(1, obj_1.likes.total)
# set up request for anonymous user #2
request = self.mk_request(ip='192.168.123.1')
# anonymous user #2 likes user2
obj_1.likes.increment(request)
eq_(2, obj_1.likes.total)
# anonymous user #2 likes user2, again
obj_1.likes.increment(request)
eq_(2, obj_1.likes.total)
# set up request for authenticated user1
request = self.mk_request(user=self.user1)
# authenticated user1 likes user2
obj_1.likes.increment(request)
eq_(3, obj_1.likes.total)
# authenticated user1 likes user2, again
obj_1.likes.increment(request)
eq_(3, obj_1.likes.total)
# authenticated user1 likes user2, again, from another IP
request.META['REMOTE_ADDR'] = '192.168.123.50'
obj_1.likes.increment(request)
eq_(3, obj_1.likes.total)
# set up request for user agent Mozilla 1.0
request = self.mk_request(ip='192.168.123.50',
user_agent='Mozilla 1.0')
obj_1.likes.increment(request)
eq_(4, obj_1.likes.total)
# set up request for user agent Safari 1.0
request = self.mk_request(ip='192.168.123.50', user_agent='Safari 1.0')
obj_1.likes.increment(request)
eq_(5, obj_1.likes.total)
def test_action_with_max(self):
"""Action with a max_total_per_unique greater than 1"""
obj_1 = self.obj_1
MAX = obj_1.views.field.max_total_per_unique
request = self.mk_request(ip='192.168.123.123')
for x in range(1, MAX + 1):
obj_1.views.increment(request)
eq_(x, obj_1.views.total)
obj_1.views.increment(request)
eq_(MAX, obj_1.views.total)
obj_1.views.increment(request)
eq_(MAX, obj_1.views.total)
def test_action_with_min(self):
"""Action with a min_total_per_unique greater than 1"""
obj_1 = self.obj_1
MIN = obj_1.frobs.field.min_total_per_unique
request = self.mk_request(ip='192.168.123.123')
for x in range(1, (0 - MIN) + 1):
obj_1.frobs.decrement(request)
eq_(0 - x, obj_1.frobs.total)
obj_1.frobs.decrement(request)
eq_(MIN, obj_1.frobs.total)
obj_1.frobs.decrement(request)
eq_(MIN, obj_1.frobs.total)
def test_action_count_per_unique(self):
"""Exercise action counts per unique and ensure overall total works"""
obj_1 = self.obj_1
MAX = obj_1.boogs.field.max_total_per_unique
MIN = obj_1.boogs.field.min_total_per_unique
UNIQUES = (
dict(user=self.user1),
dict(user=self.user2),
dict(ip='192.168.123.123'),
dict(ip='192.168.123.150', user_agent="Safari 1.0"),
dict(ip='192.168.123.150', user_agent="Mozilla 1.0"),
dict(ip='192.168.123.160'),
)
for unique in UNIQUES:
request = self.mk_request(**unique)
for x in range(1, MAX + 1):
obj_1.boogs.increment(request)
eq_(x, obj_1.boogs.get_total_for_request(request))
obj_1.boogs.increment(request)
obj_1.boogs.increment(request)
eq_(MAX, obj_1.boogs.get_total_for_request(request))
eq_(MAX * len(UNIQUES), obj_1.boogs.total)
# Undo all the increments before going below zero
for unique in UNIQUES:
request = self.mk_request(**unique)
for x in range(1, MAX + 1):
obj_1.boogs.decrement(request)
for unique in UNIQUES:
request = self.mk_request(**unique)
for x in range(1, (0 - MIN) + 1):
obj_1.boogs.decrement(request)
eq_(0 - x, obj_1.boogs.get_total_for_request(request))
obj_1.boogs.decrement(request)
obj_1.boogs.decrement(request)
eq_(MIN, obj_1.boogs.get_total_for_request(request))
eq_(MIN * len(UNIQUES), obj_1.boogs.total)
def test_count_starts_at_zero(self):
"""
Make sure initial count is zero.
Sounds dumb, but it was a bug at one point.
"""
request = self.mk_request()
eq_(0, self.obj_1.likes.get_total_for_request(request))
| mpl-2.0 |
jbassen/edx-platform | cms/djangoapps/contentstore/management/commands/empty_asset_trashcan.py | 206 | 1134 | from django.core.management.base import BaseCommand, CommandError
from xmodule.contentstore.utils import empty_asset_trashcan
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from .prompt import query_yes_no
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
help = '''Empty the trashcan. Can pass an optional course_id to limit the damage.'''
def handle(self, *args, **options):
if len(args) != 1 and len(args) != 0:
raise CommandError("empty_asset_trashcan requires one or no arguments: |<course_id>|")
if len(args) == 1:
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
course_ids = [course_key]
else:
course_ids = [course.id for course in modulestore().get_courses()]
if query_yes_no("Emptying trashcan. Confirm?", default="no"):
empty_asset_trashcan(course_ids)
| agpl-3.0 |
andim27/magiccamp | build/lib/django/contrib/auth/backends.py | 33 | 4327 | from django.db import connection
from django.contrib.auth.models import User, Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
supports_object_permissions = False
supports_anonymous_user = True
# TODO: Model, login attribute name and password attribute name should be
# configurable.
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
perms = Permission.objects.filter(group__user=user_obj
).values_list('content_type__app_label', 'codename'
).order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if user_obj.is_anonymous():
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm):
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
| bsd-3-clause |
chuan9/chromium-crosswalk | build/android/pylib/utils/json_results_generator_unittest.py | 87 | 7184 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Most of this file was ported over from Blink's
# webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
#
import unittest
import json
from pylib.utils import json_results_generator
class JSONGeneratorTest(unittest.TestCase):
def setUp(self):
self.builder_name = 'DUMMY_BUILDER_NAME'
self.build_name = 'DUMMY_BUILD_NAME'
self.build_number = 'DUMMY_BUILDER_NUMBER'
# For archived results.
self._json = None
self._num_runs = 0
self._tests_set = set([])
self._test_timings = {}
self._failed_count_map = {}
self._PASS_count = 0
self._DISABLED_count = 0
self._FLAKY_count = 0
self._FAILS_count = 0
self._fixable_count = 0
self._orig_write_json = json_results_generator.WriteJSON
# unused arguments ... pylint: disable=W0613
def _WriteJSONStub(json_object, file_path, callback=None):
pass
json_results_generator.WriteJSON = _WriteJSONStub
def tearDown(self):
json_results_generator.WriteJSON = self._orig_write_json
def _TestJSONGeneration(self, passed_tests_list, failed_tests_list):
tests_set = set(passed_tests_list) | set(failed_tests_list)
DISABLED_tests = set([t for t in tests_set
if t.startswith('DISABLED_')])
FLAKY_tests = set([t for t in tests_set
if t.startswith('FLAKY_')])
FAILS_tests = set([t for t in tests_set
if t.startswith('FAILS_')])
PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
failed_tests = set(failed_tests_list) - DISABLED_tests
failed_count_map = dict([(t, 1) for t in failed_tests])
test_timings = {}
i = 0
for test in tests_set:
test_timings[test] = float(self._num_runs * 100 + i)
i += 1
test_results_map = dict()
for test in tests_set:
test_results_map[test] = json_results_generator.TestResult(
test, failed=(test in failed_tests),
elapsed_time=test_timings[test])
generator = json_results_generator.JSONResultsGeneratorBase(
self.builder_name, self.build_name, self.build_number,
'',
None, # don't fetch past json results archive
test_results_map)
failed_count_map = dict([(t, 1) for t in failed_tests])
# Test incremental json results
incremental_json = generator.GetJSON()
self._VerifyJSONResults(
tests_set,
test_timings,
failed_count_map,
len(PASS_tests),
len(DISABLED_tests),
len(FLAKY_tests),
len(DISABLED_tests | failed_tests),
incremental_json,
1)
# We don't verify the results here, but at least we make sure the code
# runs without errors.
generator.GenerateJSONOutput()
generator.GenerateTimesMSFile()
def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map,
PASS_count, DISABLED_count, FLAKY_count,
fixable_count, json_obj, num_runs):
# Aliasing to a short name for better access to its constants.
JRG = json_results_generator.JSONResultsGeneratorBase
self.assertIn(JRG.VERSION_KEY, json_obj)
self.assertIn(self.builder_name, json_obj)
buildinfo = json_obj[self.builder_name]
self.assertIn(JRG.FIXABLE, buildinfo)
self.assertIn(JRG.TESTS, buildinfo)
self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
if tests_set or DISABLED_count:
fixable = {}
for fixable_items in buildinfo[JRG.FIXABLE]:
for (result_type, count) in fixable_items.iteritems():
if result_type in fixable:
fixable[result_type] = fixable[result_type] + count
else:
fixable[result_type] = count
if PASS_count:
self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
else:
self.assertTrue(JRG.PASS_RESULT not in fixable or
fixable[JRG.PASS_RESULT] == 0)
if DISABLED_count:
self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
else:
self.assertTrue(JRG.SKIP_RESULT not in fixable or
fixable[JRG.SKIP_RESULT] == 0)
if FLAKY_count:
self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
else:
self.assertTrue(JRG.FLAKY_RESULT not in fixable or
fixable[JRG.FLAKY_RESULT] == 0)
if failed_count_map:
tests = buildinfo[JRG.TESTS]
for test_name in failed_count_map.iterkeys():
test = self._FindTestInTrie(test_name, tests)
failed = 0
for result in test[JRG.RESULTS]:
if result[1] == JRG.FAIL_RESULT:
failed += result[0]
self.assertEqual(failed_count_map[test_name], failed)
timing_count = 0
for timings in test[JRG.TIMES]:
if timings[1] == test_timings[test_name]:
timing_count = timings[0]
self.assertEqual(1, timing_count)
if fixable_count:
self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
def _FindTestInTrie(self, path, trie):
nodes = path.split('/')
sub_trie = trie
for node in nodes:
self.assertIn(node, sub_trie)
sub_trie = sub_trie[node]
return sub_trie
def testJSONGeneration(self):
self._TestJSONGeneration([], [])
self._TestJSONGeneration(['A1', 'B1'], [])
self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2'])
self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], [])
self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4'])
self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
self._TestJSONGeneration(
['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
['FAILS_D6'])
# Generate JSON with the same test sets. (Both incremental results and
# archived results must be updated appropriately.)
self._TestJSONGeneration(
['A', 'FLAKY_B', 'DISABLED_C'],
['FAILS_D', 'FLAKY_E'])
self._TestJSONGeneration(
['A', 'DISABLED_C', 'FLAKY_E'],
['FLAKY_B', 'FAILS_D'])
self._TestJSONGeneration(
['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
['A', 'FLAKY_E'])
def testHierarchicalJSNGeneration(self):
# FIXME: Re-work tests to be more comprehensible and comprehensive.
self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C'])
def testTestTimingsTrie(self):
individual_test_timings = []
individual_test_timings.append(
json_results_generator.TestResult(
'foo/bar/baz.html',
elapsed_time=1.2))
individual_test_timings.append(
json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
trie = json_results_generator.TestTimingsTrie(individual_test_timings)
expected_trie = {
'bar.html': 0,
'foo': {
'bar': {
'baz.html': 1200,
}
}
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
| bsd-3-clause |
ecrespo/django_kanban-agile | kanban/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| mit |
rishig/zulip | zerver/tests/test_unread.py | 2 | 23491 | # -*- coding: utf-8 -*-AA
from typing import Any, List, Mapping
from django.db import connection
from zerver.models import (
get_realm,
get_stream,
get_stream_recipient,
get_user,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.fix_unreads import (
fix,
fix_pre_pointer,
fix_unsubscribed,
)
from zerver.lib.test_helpers import (
get_subscription,
tornado_redirected_to_list,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.topic_mutes import add_topic_mute
import mock
import ujson
class PointerTest(ZulipTestCase):
def test_update_pointer(self) -> None:
"""
Posting a pointer to /update (in the form {"pointer": pointer}) changes
the pointer we store for your UserProfile.
"""
self.login(self.example_email("hamlet"))
self.assertEqual(self.example_user('hamlet').pointer, -1)
msg_id = self.send_stream_message(self.example_email("othello"), "Verona")
result = self.client_post("/json/users/me/pointer", {"pointer": msg_id})
self.assert_json_success(result)
self.assertEqual(self.example_user('hamlet').pointer, msg_id)
def test_api_update_pointer(self) -> None:
"""
Same as above, but for the API view
"""
user = self.example_user('hamlet')
email = user.email
self.assertEqual(user.pointer, -1)
msg_id = self.send_stream_message(self.example_email("othello"), "Verona")
result = self.api_post(email, "/api/v1/users/me/pointer", {"pointer": msg_id})
self.assert_json_success(result)
self.assertEqual(get_user(email, user.realm).pointer, msg_id)
def test_missing_pointer(self) -> None:
"""
Posting json to /json/users/me/pointer which does not contain a pointer key/value pair
returns a 400 and error message.
"""
self.login(self.example_email("hamlet"))
self.assertEqual(self.example_user('hamlet').pointer, -1)
result = self.client_post("/json/users/me/pointer", {"foo": 1})
self.assert_json_error(result, "Missing 'pointer' argument")
self.assertEqual(self.example_user('hamlet').pointer, -1)
def test_invalid_pointer(self) -> None:
"""
Posting json to /json/users/me/pointer with an invalid pointer returns a 400 and error
message.
"""
self.login(self.example_email("hamlet"))
self.assertEqual(self.example_user('hamlet').pointer, -1)
result = self.client_post("/json/users/me/pointer", {"pointer": "foo"})
self.assert_json_error(result, "Bad value for 'pointer': foo")
self.assertEqual(self.example_user('hamlet').pointer, -1)
def test_pointer_out_of_range(self) -> None:
"""
Posting json to /json/users/me/pointer with an out of range (< 0) pointer returns a 400
and error message.
"""
self.login(self.example_email("hamlet"))
self.assertEqual(self.example_user('hamlet').pointer, -1)
result = self.client_post("/json/users/me/pointer", {"pointer": -2})
self.assert_json_error(result, "Bad value for 'pointer': -2")
self.assertEqual(self.example_user('hamlet').pointer, -1)
def test_use_first_unread_anchor_interaction_with_pointer(self) -> None:
"""
Getting old messages (a get request to /json/messages) should never
return an unread message older than the current pointer, when there's
no narrow set.
"""
self.login(self.example_email("hamlet"))
# Ensure the pointer is not set (-1)
self.assertEqual(self.example_user('hamlet').pointer, -1)
# Mark all existing messages as read
result = self.client_post("/json/mark_all_as_read")
self.assert_json_success(result)
# Send a new message (this will be unread)
new_message_id = self.send_stream_message(self.example_email("othello"), "Verona",
"test")
# If we call get_messages with use_first_unread_anchor=True, we
# should get the message we just sent
messages_response = self.get_messages_response(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True)
self.assertEqual(messages_response['messages'][0]['id'], new_message_id)
self.assertEqual(messages_response['anchor'], new_message_id)
# We want to get the message_id of an arbitrar old message. We can
# call get_messages with use_first_unread_anchor=False and simply
# save the first message we're returned.
messages = self.get_messages(
anchor=0, num_before=0, num_after=2, use_first_unread_anchor=False)
old_message_id = messages[0]['id']
next_old_message_id = messages[1]['id']
# Verify the message is marked as read
user_message = UserMessage.objects.get(
message_id=old_message_id,
user_profile=self.example_user('hamlet'))
self.assertTrue(user_message.flags.read)
# Let's set this old message to be unread
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps([old_message_id]),
"op": "remove",
"flag": "read"})
# Verify it's now marked as unread
user_message = UserMessage.objects.get(
message_id=old_message_id,
user_profile=self.example_user('hamlet'))
self.assert_json_success(result)
self.assertFalse(user_message.flags.read)
# Now if we call get_messages with use_first_unread_anchor=True,
# we should get the old message we just set to unread
messages_response = self.get_messages_response(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True)
self.assertEqual(messages_response['messages'][0]['id'], old_message_id)
self.assertEqual(messages_response['anchor'], old_message_id)
# Let's update the pointer to be *after* this old unread message (but
# still on or before the new unread message we just sent)
result = self.client_post("/json/users/me/pointer",
{"pointer": next_old_message_id})
self.assert_json_success(result)
self.assertEqual(self.example_user('hamlet').pointer,
next_old_message_id)
# Verify that moving the pointer didn't mark our message as read.
user_message = UserMessage.objects.get(
message_id=old_message_id,
user_profile=self.example_user('hamlet'))
self.assertFalse(user_message.flags.read)
# Now if we call get_messages with use_first_unread_anchor=True,
# we should not get the old unread message (because it's before the
# pointer), and instead should get the newly sent unread message
messages_response = self.get_messages_response(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True)
self.assertEqual(messages_response['messages'][0]['id'], new_message_id)
self.assertEqual(messages_response['anchor'], new_message_id)
def test_visible_messages_use_first_unread_anchor(self) -> None:
self.login(self.example_email("hamlet"))
self.assertEqual(self.example_user('hamlet').pointer, -1)
result = self.client_post("/json/mark_all_as_read")
self.assert_json_success(result)
new_message_id = self.send_stream_message(self.example_email("othello"), "Verona",
"test")
messages_response = self.get_messages_response(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True)
self.assertEqual(messages_response['messages'][0]['id'], new_message_id)
self.assertEqual(messages_response['anchor'], new_message_id)
with mock.patch('zerver.views.messages.get_first_visible_message_id', return_value=new_message_id):
messages_response = self.get_messages_response(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True)
self.assertEqual(messages_response['messages'][0]['id'], new_message_id)
self.assertEqual(messages_response['anchor'], new_message_id)
with mock.patch('zerver.views.messages.get_first_visible_message_id', return_value=new_message_id + 1):
messages_reponse = self.get_messages_response(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True)
self.assert_length(messages_reponse['messages'], 0)
self.assertIn('anchor', messages_reponse)
with mock.patch('zerver.views.messages.get_first_visible_message_id', return_value=new_message_id - 1):
messages = self.get_messages(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True)
self.assert_length(messages, 1)
class UnreadCountTests(ZulipTestCase):
def setUp(self) -> None:
with mock.patch('zerver.lib.push_notifications.push_notifications_enabled',
return_value = True) as mock_push_notifications_enabled:
self.unread_msg_ids = [
self.send_personal_message(
self.example_email("iago"), self.example_email("hamlet"), "hello"),
self.send_personal_message(
self.example_email("iago"), self.example_email("hamlet"), "hello2")]
mock_push_notifications_enabled.assert_called()
# Sending a new message results in unread UserMessages being created
def test_new_message(self) -> None:
self.login(self.example_email("hamlet"))
content = "Test message for unset read bit"
last_msg = self.send_stream_message(self.example_email("hamlet"), "Verona", content)
user_messages = list(UserMessage.objects.filter(message=last_msg))
self.assertEqual(len(user_messages) > 0, True)
for um in user_messages:
self.assertEqual(um.message.content, content)
if um.user_profile.email != self.example_email("hamlet"):
self.assertFalse(um.flags.read)
def test_update_flags(self) -> None:
self.login(self.example_email("hamlet"))
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps(self.unread_msg_ids),
"op": "add",
"flag": "read"})
self.assert_json_success(result)
# Ensure we properly set the flags
found = 0
for msg in self.get_messages():
if msg['id'] in self.unread_msg_ids:
self.assertEqual(msg['flags'], ['read'])
found += 1
self.assertEqual(found, 2)
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps([self.unread_msg_ids[1]]),
"op": "remove", "flag": "read"})
self.assert_json_success(result)
# Ensure we properly remove just one flag
for msg in self.get_messages():
if msg['id'] == self.unread_msg_ids[0]:
self.assertEqual(msg['flags'], ['read'])
elif msg['id'] == self.unread_msg_ids[1]:
self.assertEqual(msg['flags'], [])
def test_mark_all_in_stream_read(self) -> None:
self.login(self.example_email("hamlet"))
user_profile = self.example_user('hamlet')
stream = self.subscribe(user_profile, "test_stream")
self.subscribe(self.example_user("cordelia"), "test_stream")
message_id = self.send_stream_message(self.example_email("hamlet"), "test_stream", "hello")
unrelated_message_id = self.send_stream_message(self.example_email("hamlet"), "Denmark", "hello")
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_post("/json/mark_stream_as_read", {
"stream_id": stream.id
})
self.assert_json_success(result)
self.assertTrue(len(events) == 1)
event = events[0]['event']
expected = dict(operation='add',
messages=[message_id],
flag='read',
type='update_message_flags',
all=False)
differences = [key for key in expected if expected[key] != event[key]]
self.assertTrue(len(differences) == 0)
um = list(UserMessage.objects.filter(message=message_id))
for msg in um:
if msg.user_profile.email == self.example_email("hamlet"):
self.assertTrue(msg.flags.read)
else:
self.assertFalse(msg.flags.read)
unrelated_messages = list(UserMessage.objects.filter(message=unrelated_message_id))
for msg in unrelated_messages:
if msg.user_profile.email == self.example_email("hamlet"):
self.assertFalse(msg.flags.read)
def test_mark_all_in_invalid_stream_read(self) -> None:
self.login(self.example_email("hamlet"))
invalid_stream_id = "12345678"
result = self.client_post("/json/mark_stream_as_read", {
"stream_id": invalid_stream_id
})
self.assert_json_error(result, 'Invalid stream id')
def test_mark_all_topics_unread_with_invalid_stream_name(self) -> None:
self.login(self.example_email("hamlet"))
invalid_stream_id = "12345678"
result = self.client_post("/json/mark_topic_as_read", {
"stream_id": invalid_stream_id,
'topic_name': 'whatever',
})
self.assert_json_error(result, "Invalid stream id")
def test_mark_all_in_stream_topic_read(self) -> None:
self.login(self.example_email("hamlet"))
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, "test_stream")
message_id = self.send_stream_message(self.example_email("hamlet"), "test_stream", "hello", "test_topic")
unrelated_message_id = self.send_stream_message(self.example_email("hamlet"), "Denmark", "hello", "Denmark2")
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_post("/json/mark_topic_as_read", {
"stream_id": get_stream("test_stream", user_profile.realm).id,
"topic_name": "test_topic",
})
self.assert_json_success(result)
self.assertTrue(len(events) == 1)
event = events[0]['event']
expected = dict(operation='add',
messages=[message_id],
flag='read',
type='update_message_flags',
all=False)
differences = [key for key in expected if expected[key] != event[key]]
self.assertTrue(len(differences) == 0)
um = list(UserMessage.objects.filter(message=message_id))
for msg in um:
if msg.user_profile.email == self.example_email("hamlet"):
self.assertTrue(msg.flags.read)
unrelated_messages = list(UserMessage.objects.filter(message=unrelated_message_id))
for msg in unrelated_messages:
if msg.user_profile.email == self.example_email("hamlet"):
self.assertFalse(msg.flags.read)
def test_mark_all_in_invalid_topic_read(self) -> None:
self.login(self.example_email("hamlet"))
invalid_topic_name = "abc"
result = self.client_post("/json/mark_topic_as_read", {
"stream_id": get_stream("Denmark", get_realm("zulip")).id,
"topic_name": invalid_topic_name,
})
self.assert_json_error(result, 'No such topic \'abc\'')
class FixUnreadTests(ZulipTestCase):
def test_fix_unreads(self) -> None:
user = self.example_user('hamlet')
realm = get_realm('zulip')
def send_message(stream_name: str, topic_name: str) -> int:
msg_id = self.send_stream_message(
self.example_email("othello"),
stream_name,
topic_name=topic_name)
um = UserMessage.objects.get(
user_profile=user,
message_id=msg_id)
return um.id
def assert_read(user_message_id: int) -> None:
um = UserMessage.objects.get(id=user_message_id)
self.assertTrue(um.flags.read)
def assert_unread(user_message_id: int) -> None:
um = UserMessage.objects.get(id=user_message_id)
self.assertFalse(um.flags.read)
def mute_stream(stream_name: str) -> None:
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
subscription = Subscription.objects.get(
user_profile=user,
recipient=recipient
)
subscription.is_muted = True
subscription.save()
def mute_topic(stream_name: str, topic_name: str) -> None:
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
def force_unsubscribe(stream_name: str) -> None:
'''
We don't want side effects here, since the eventual
unsubscribe path may mark messages as read, defeating
the test setup here.
'''
sub = get_subscription(stream_name, user)
sub.active = False
sub.save()
# The data setup here is kind of funny, because some of these
# conditions should not actually happen in practice going forward,
# but we may have had bad data from the past.
mute_stream('Denmark')
mute_topic('Verona', 'muted_topic')
um_normal_id = send_message('Verona', 'normal')
um_muted_topic_id = send_message('Verona', 'muted_topic')
um_muted_stream_id = send_message('Denmark', 'whatever')
user.pointer = self.get_last_message().id
user.save()
um_post_pointer_id = send_message('Verona', 'muted_topic')
self.subscribe(user, 'temporary')
um_unsubscribed_id = send_message('temporary', 'whatever')
force_unsubscribe('temporary')
# verify data setup
assert_unread(um_normal_id)
assert_unread(um_muted_topic_id)
assert_unread(um_muted_stream_id)
assert_unread(um_post_pointer_id)
assert_unread(um_unsubscribed_id)
with connection.cursor() as cursor:
fix_pre_pointer(cursor, user)
# The only message that should have been fixed is the "normal"
# unumuted message before the pointer.
assert_read(um_normal_id)
# We don't "fix" any messages that are either muted or after the
# pointer, because they can be legitimately unread.
assert_unread(um_muted_topic_id)
assert_unread(um_muted_stream_id)
assert_unread(um_post_pointer_id)
assert_unread(um_unsubscribed_id)
# fix unsubscribed
with connection.cursor() as cursor:
fix_unsubscribed(cursor, user)
# Most messages don't change.
assert_unread(um_muted_topic_id)
assert_unread(um_muted_stream_id)
assert_unread(um_post_pointer_id)
# The unsubscribed entry should change.
assert_read(um_unsubscribed_id)
# test idempotency
fix(user)
assert_read(um_normal_id)
assert_unread(um_muted_topic_id)
assert_unread(um_muted_stream_id)
assert_unread(um_post_pointer_id)
assert_read(um_unsubscribed_id)
class PushNotificationMarkReadFlowsTest(ZulipTestCase):
def get_mobile_push_notification_ids(self, user_profile: UserProfile) -> List[int]:
return list(UserMessage.objects.filter(
user_profile=user_profile,
flags=UserMessage.flags.active_mobile_push_notification).order_by(
"message_id").values_list("message_id", flat=True))
@mock.patch('zerver.lib.push_notifications.push_notifications_enabled', return_value=True)
def test_track_active_mobile_push_notifications(self, mock_push_notifications: mock.MagicMock) -> None:
mock_push_notifications.return_value = True
self.login(self.example_email("hamlet"))
user_profile = self.example_user('hamlet')
stream = self.subscribe(user_profile, "test_stream")
second_stream = self.subscribe(user_profile, "second_stream")
property_name = "push_notifications"
result = self.api_post(user_profile.email, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": True,
"stream_id": stream.id}])})
result = self.api_post(user_profile.email, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": True,
"stream_id": second_stream.id}])})
self.assert_json_success(result)
self.assertEqual(self.get_mobile_push_notification_ids(user_profile), [])
message_id = self.send_stream_message(self.example_email("cordelia"), "test_stream", "hello", "test_topic")
second_message_id = self.send_stream_message(self.example_email("cordelia"), "test_stream", "hello", "other_topic")
third_message_id = self.send_stream_message(self.example_email("cordelia"), "second_stream", "hello", "test_topic")
self.assertEqual(self.get_mobile_push_notification_ids(user_profile),
[message_id, second_message_id, third_message_id])
result = self.client_post("/json/mark_topic_as_read", {
"stream_id": str(stream.id),
"topic_name": "test_topic",
})
self.assert_json_success(result)
self.assertEqual(self.get_mobile_push_notification_ids(user_profile),
[second_message_id, third_message_id])
result = self.client_post("/json/mark_stream_as_read", {
"stream_id": str(stream.id),
"topic_name": "test_topic",
})
self.assertEqual(self.get_mobile_push_notification_ids(user_profile),
[third_message_id])
fourth_message_id = self.send_stream_message(self.example_email("cordelia"), "test_stream", "hello", "test_topic")
self.assertEqual(self.get_mobile_push_notification_ids(user_profile),
[third_message_id, fourth_message_id])
result = self.client_post("/json/mark_all_as_read", {})
self.assertEqual(self.get_mobile_push_notification_ids(user_profile),
[])
mock_push_notifications.assert_called()
| apache-2.0 |
wujuguang/scrapy | tests/test_downloadermiddleware_robotstxt.py | 1 | 8351 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from twisted.internet import reactor, error
from twisted.internet.defer import Deferred, DeferredList, maybeDeferred
from twisted.python import failure
from twisted.trial import unittest
from scrapy.downloadermiddlewares.robotstxt import (RobotsTxtMiddleware,
logger as mw_module_logger)
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import Request, Response, TextResponse
from scrapy.settings import Settings
from tests import mock
from tests.test_robotstxt_interface import rerp_available, reppy_available
class RobotsTxtMiddlewareTest(unittest.TestCase):
def setUp(self):
self.crawler = mock.MagicMock()
self.crawler.settings = Settings()
self.crawler.engine.download = mock.MagicMock()
def tearDown(self):
del self.crawler
def test_robotstxt_settings(self):
self.crawler.settings = Settings()
self.crawler.settings.set('USER_AGENT', 'CustomAgent')
self.assertRaises(NotConfigured, RobotsTxtMiddleware, self.crawler)
def _get_successful_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
ROBOTS = u"""
User-Agent: *
Disallow: /admin/
Disallow: /static/
# taken from https://en.wikipedia.org/robots.txt
Disallow: /wiki/K%C3%A4ytt%C3%A4j%C3%A4:
Disallow: /wiki/Käyttäjä:
User-Agent: UnicödeBöt
Disallow: /some/randome/page.html
""".encode('utf-8')
response = TextResponse('http://site.local/robots.txt', body=ROBOTS)
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
return DeferredList([
self.assertNotIgnored(Request('http://site.local/allowed'), middleware),
self.assertIgnored(Request('http://site.local/admin/main'), middleware),
self.assertIgnored(Request('http://site.local/static/'), middleware),
self.assertIgnored(Request('http://site.local/wiki/K%C3%A4ytt%C3%A4j%C3%A4:'), middleware),
self.assertIgnored(Request(u'http://site.local/wiki/Käyttäjä:'), middleware)
], fireOnOneErrback=True)
def test_robotstxt_ready_parser(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
d = self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
d.addCallback(lambda _: self.assertNotIgnored(Request('http://site.local/allowed'), middleware))
return d
def test_robotstxt_meta(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
meta = {'dont_obey_robotstxt': True}
return DeferredList([
self.assertNotIgnored(Request('http://site.local/allowed', meta=meta), middleware),
self.assertNotIgnored(Request('http://site.local/admin/main', meta=meta), middleware),
self.assertNotIgnored(Request('http://site.local/static/', meta=meta), middleware)
], fireOnOneErrback=True)
def _get_garbage_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
response = Response('http://site.local/robots.txt', body=b'GIF89a\xd3\x00\xfe\x00\xa2')
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt_garbage(self):
# garbage response should be discarded, equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_garbage_crawler())
deferred = DeferredList([
self.assertNotIgnored(Request('http://site.local'), middleware),
self.assertNotIgnored(Request('http://site.local/allowed'), middleware),
self.assertNotIgnored(Request('http://site.local/admin/main'), middleware),
self.assertNotIgnored(Request('http://site.local/static/'), middleware)
], fireOnOneErrback=True)
return deferred
def _get_emptybody_crawler(self):
crawler = self.crawler
crawler.settings.set('ROBOTSTXT_OBEY', True)
response = Response('http://site.local/robots.txt')
def return_response(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.callback, response)
return deferred
crawler.engine.download.side_effect = return_response
return crawler
def test_robotstxt_empty_response(self):
# empty response should equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_emptybody_crawler())
return DeferredList([
self.assertNotIgnored(Request('http://site.local/allowed'), middleware),
self.assertNotIgnored(Request('http://site.local/admin/main'), middleware),
self.assertNotIgnored(Request('http://site.local/static/'), middleware)
], fireOnOneErrback=True)
def test_robotstxt_error(self):
self.crawler.settings.set('ROBOTSTXT_OBEY', True)
err = error.DNSLookupError('Robotstxt address not found')
def return_failure(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.errback, failure.Failure(err))
return deferred
self.crawler.engine.download.side_effect = return_failure
middleware = RobotsTxtMiddleware(self.crawler)
middleware._logerror = mock.MagicMock(side_effect=middleware._logerror)
deferred = middleware.process_request(Request('http://site.local'), None)
deferred.addCallback(lambda _: self.assertTrue(middleware._logerror.called))
return deferred
def test_robotstxt_immediate_error(self):
self.crawler.settings.set('ROBOTSTXT_OBEY', True)
err = error.DNSLookupError('Robotstxt address not found')
def immediate_failure(request, spider):
deferred = Deferred()
deferred.errback(failure.Failure(err))
return deferred
self.crawler.engine.download.side_effect = immediate_failure
middleware = RobotsTxtMiddleware(self.crawler)
return self.assertNotIgnored(Request('http://site.local'), middleware)
def test_ignore_robotstxt_request(self):
self.crawler.settings.set('ROBOTSTXT_OBEY', True)
def ignore_request(request, spider):
deferred = Deferred()
reactor.callFromThread(deferred.errback, failure.Failure(IgnoreRequest()))
return deferred
self.crawler.engine.download.side_effect = ignore_request
middleware = RobotsTxtMiddleware(self.crawler)
mw_module_logger.error = mock.MagicMock()
d = self.assertNotIgnored(Request('http://site.local/allowed'), middleware)
d.addCallback(lambda _: self.assertFalse(mw_module_logger.error.called))
return d
def assertNotIgnored(self, request, middleware):
spider = None # not actually used
dfd = maybeDeferred(middleware.process_request, request, spider)
dfd.addCallback(self.assertIsNone)
return dfd
def assertIgnored(self, request, middleware):
spider = None # not actually used
return self.assertFailure(maybeDeferred(middleware.process_request, request, spider),
IgnoreRequest)
class RobotsTxtMiddlewareWithRerpTest(RobotsTxtMiddlewareTest):
if not rerp_available():
skip = "Rerp parser is not installed"
def setUp(self):
super(RobotsTxtMiddlewareWithRerpTest, self).setUp()
self.crawler.settings.set('ROBOTSTXT_PARSER', 'scrapy.robotstxt.RerpRobotParser')
class RobotsTxtMiddlewareWithReppyTest(RobotsTxtMiddlewareTest):
if not reppy_available():
skip = "Reppy parser is not installed"
def setUp(self):
super(RobotsTxtMiddlewareWithReppyTest, self).setUp()
self.crawler.settings.set('ROBOTSTXT_PARSER', 'scrapy.robotstxt.ReppyRobotParser')
| bsd-3-clause |
trabacus-softapps/openerp-8.0-cc | openerp/addons/project_long_term/__openerp__.py | 57 | 2646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Long Term Projects',
'version': '1.1',
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'category': 'Project Management',
'images': ['images/project_phase_form.jpeg','images/project_phases.jpeg', 'images/resources_allocation.jpeg'],
'depends': ['project'],
'description': """
Long Term Project management module that tracks planning, scheduling, resources allocation.
===========================================================================================
Features:
---------
* Manage Big project
* Define various Phases of Project
* Compute Phase Scheduling: Compute start date and end date of the phases
which are in draft, open and pending state of the project given. If no
project given then all the draft, open and pending state phases will be taken.
* Compute Task Scheduling: This works same as the scheduler button on
project.phase. It takes the project as argument and computes all the open,
draft and pending tasks.
* Schedule Tasks: All the tasks which are in draft, pending and open state
are scheduled with taking the phase's start date.
""",
'demo': ['project_long_term_demo.xml'],
'test': [
'test/phase_process.yml',
'test/task_process.yml',
],
'data': [
'security/ir.model.access.csv',
'project_long_term_view.xml',
'project_long_term_workflow.xml',
'wizard/project_compute_phases_view.xml',
'wizard/project_compute_tasks_view.xml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ThePletch/ansible | lib/ansible/modules/cloud/cloudstack/cs_template.py | 48 | 21811 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_template
short_description: Manages templates on Apache CloudStack based clouds.
description:
- Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot, extract and delete templates.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the template.
required: true
url:
description:
- URL of where the template is hosted on C(state=present).
- URL to which the template would be extracted on C(state=extracted).
- Mutually exclusive with C(vm).
required: false
default: null
vm:
description:
- VM name the template will be created from its volume or alternatively from a snapshot.
- VM must be in stopped state if created from its volume.
- Mutually exclusive with C(url).
required: false
default: null
snapshot:
description:
- Name of the snapshot, created from the VM ROOT volume, the template will be created from.
- C(vm) is required together with this argument.
required: false
default: null
os_type:
description:
- OS type that best represents the OS of this template.
required: false
default: null
checksum:
description:
- The MD5 checksum value of this template.
- If set, we search by checksum instead of name.
required: false
default: false
is_ready:
description:
- This flag is used for searching existing templates.
- If set to C(true), it will only list template ready for deployment e.g. successfully downloaded and installed.
- Recommended to set it to C(false).
required: false
default: false
is_public:
description:
- Register the template to be publicly available to all users.
- Only used if C(state) is present.
required: false
default: false
is_featured:
description:
- Register the template to be featured.
- Only used if C(state) is present.
required: false
default: false
is_dynamically_scalable:
description:
- Register the template having XS/VMWare tools installed in order to support dynamic scaling of VM CPU/memory.
- Only used if C(state) is present.
required: false
default: false
cross_zones:
description:
- Whether the template should be synced or removed across zones.
- Only used if C(state) is present or absent.
required: false
default: false
mode:
description:
- Mode for the template extraction.
- Only used if C(state=extracted).
required: false
default: 'http_download'
choices: [ 'http_download', 'ftp_upload' ]
domain:
description:
- Domain the template, snapshot or VM is related to.
required: false
default: null
account:
description:
- Account the template, snapshot or VM is related to.
required: false
default: null
project:
description:
- Name of the project the template to be registered in.
required: false
default: null
zone:
description:
- Name of the zone you wish the template to be registered or deleted from.
- If not specified, first found zone will be used.
required: false
default: null
template_filter:
description:
- Name of the filter used to search for the template.
required: false
default: 'self'
choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ]
hypervisor:
description:
- Name the hypervisor to be used for creating the new template.
- Relevant when using C(state=present).
required: false
default: null
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
requires_hvm:
description:
- true if this template requires HVM.
required: false
default: false
password_enabled:
description:
- True if the template supports the password reset feature.
required: false
default: false
template_tag:
description:
- the tag for this template.
required: false
default: null
sshkey_enabled:
description:
- True if the template supports the sshkey upload feature.
required: false
default: false
is_routing:
description:
- True if the template type is routing i.e., if template is used to deploy router.
- Only considered if C(url) is used.
required: false
default: false
format:
description:
- The format for the template.
- Relevant when using C(state=present).
required: false
default: null
choices: [ 'QCOW2', 'RAW', 'VHD', 'OVA' ]
is_extractable:
description:
- True if the template or its derivatives are extractable.
required: false
default: false
details:
description:
- Template details in key/value pairs.
required: false
default: null
bits:
description:
- 32 or 64 bits support.
required: false
default: '64'
display_text:
description:
- Display text of the template.
required: false
default: null
state:
description:
- State of the template.
required: false
default: 'present'
choices: [ 'present', 'absent', 'extacted' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Register a systemvm template
- local_action:
module: cs_template
name: systemvm-vmware-4.5
url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova"
hypervisor: VMware
format: OVA
cross_zones: yes
os_type: Debian GNU/Linux 7(64-bit)
# Create a template from a stopped virtual machine's volume
- local_action:
module: cs_template
name: debian-base-template
vm: debian-base-vm
os_type: Debian GNU/Linux 7(64-bit)
zone: tokio-ix
password_enabled: yes
is_public: yes
# Create a template from a virtual machine's root volume snapshot
- local_action:
module: cs_template
name: debian-base-template
vm: debian-base-vm
snapshot: ROOT-233_2015061509114
os_type: Debian GNU/Linux 7(64-bit)
zone: tokio-ix
password_enabled: yes
is_public: yes
# Remove a template
- local_action:
module: cs_template
name: systemvm-4.2
cross_zones: yes
state: absent
'''
RETURN = '''
---
id:
description: UUID of the template.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the template.
returned: success
type: string
sample: Debian 7 64-bit
display_text:
description: Display text of the template.
returned: success
type: string
sample: Debian 7.7 64-bit minimal 2015-03-19
checksum:
description: MD5 checksum of the template.
returned: success
type: string
sample: 0b31bccccb048d20b551f70830bb7ad0
status:
description: Status of the template.
returned: success
type: string
sample: Download Complete
is_ready:
description: True if the template is ready to be deployed from.
returned: success
type: boolean
sample: true
is_public:
description: True if the template is public.
returned: success
type: boolean
sample: true
is_featured:
description: True if the template is featured.
returned: success
type: boolean
sample: true
is_extractable:
description: True if the template is extractable.
returned: success
type: boolean
sample: true
format:
description: Format of the template.
returned: success
type: string
sample: OVA
os_type:
description: Typo of the OS.
returned: success
type: string
sample: CentOS 6.5 (64-bit)
password_enabled:
description: True if the reset password feature is enabled, false otherwise.
returned: success
type: boolean
sample: false
sshkey_enabled:
description: true if template is sshkey enabled, false otherwise.
returned: success
type: boolean
sample: false
cross_zones:
description: true if the template is managed across all zones, false otherwise.
returned: success
type: boolean
sample: false
template_type:
description: Type of the template.
returned: success
type: string
sample: USER
created:
description: Date of registering.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
template_tag:
description: Template tag related to this template.
returned: success
type: string
sample: special
hypervisor:
description: Hypervisor related to this template.
returned: success
type: string
sample: VMware
mode:
description: Mode of extraction
returned: success
type: string
sample: http_download
state:
description: State of the extracted template
returned: success
type: string
sample: DOWNLOAD_URL_CREATED
url:
description: Url to which the template is extracted to
returned: success
type: string
sample: "http://1.2.3.4/userdata/eb307f13-4aca-45e8-b157-a414a14e6b04.ova"
tags:
description: List of resource tags associated with the template.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
zone:
description: Name of zone the template is registered in.
returned: success
type: string
sample: zuerich
domain:
description: Domain the template is related to.
returned: success
type: string
sample: example domain
account:
description: Account the template is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the template is related to.
returned: success
type: string
sample: Production
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackTemplate(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackTemplate, self).__init__(module)
self.returns = {
'checksum': 'checksum',
'status': 'status',
'isready': 'is_ready',
'templatetag': 'template_tag',
'sshkeyenabled': 'sshkey_enabled',
'passwordenabled': 'password_enabled',
'tempaltetype': 'template_type',
'ostypename': 'os_type',
'crossZones': 'cross_zones',
'isextractable': 'is_extractable',
'isfeatured': 'is_featured',
'ispublic': 'is_public',
'format': 'format',
'hypervisor': 'hypervisor',
'url': 'url',
'extractMode': 'mode',
'state': 'state',
}
def _get_args(self):
args = {}
args['name'] = self.module.params.get('name')
args['displaytext'] = self.get_or_fallback('display_text', 'name')
args['bits'] = self.module.params.get('bits')
args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
args['isextractable'] = self.module.params.get('is_extractable')
args['isfeatured'] = self.module.params.get('is_featured')
args['ispublic'] = self.module.params.get('is_public')
args['passwordenabled'] = self.module.params.get('password_enabled')
args['requireshvm'] = self.module.params.get('requires_hvm')
args['templatetag'] = self.module.params.get('template_tag')
args['ostypeid'] = self.get_os_type(key='id')
if not args['ostypeid']:
self.module.fail_json(msg="Missing required arguments: os_type")
return args
def get_root_volume(self, key=None):
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['type'] = "ROOT"
volumes = self.cs.listVolumes(**args)
if volumes:
return self._get_by_key(key, volumes['volume'][0])
self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name'))
def get_snapshot(self, key=None):
snapshot = self.module.params.get('snapshot')
if not snapshot:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['volumeid'] = self.get_root_volume('id')
snapshots = self.cs.listSnapshots(**args)
if snapshots:
for s in snapshots['snapshot']:
if snapshot in [ s['name'], s['id'] ]:
return self._get_by_key(key, s)
self.module.fail_json(msg="Snapshot '%s' not found" % snapshot)
def create_template(self):
template = self.get_template()
if not template:
self.result['changed'] = True
args = self._get_args()
snapshot_id = self.get_snapshot(key='id')
if snapshot_id:
args['snapshotid'] = snapshot_id
else:
args['volumeid'] = self.get_root_volume('id')
if not self.module.check_mode:
template = self.cs.createTemplate(**args)
if 'errortext' in template:
self.module.fail_json(msg="Failed: '%s'" % template['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
template = self.poll_job(template, 'template')
return template
def register_template(self):
required_params = [
'format',
'url',
'hypervisor',
]
self.module.fail_on_missing_params(required_params=required_params)
template = self.get_template()
if not template:
self.result['changed'] = True
args = self._get_args()
args['url'] = self.module.params.get('url')
args['format'] = self.module.params.get('format')
args['checksum'] = self.module.params.get('checksum')
args['isextractable'] = self.module.params.get('is_extractable')
args['isrouting'] = self.module.params.get('is_routing')
args['sshkeyenabled'] = self.module.params.get('sshkey_enabled')
args['hypervisor'] = self.get_hypervisor()
args['domainid'] = self.get_domain(key='id')
args['account'] = self.get_account(key='name')
args['projectid'] = self.get_project(key='id')
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
args['zoneid'] = -1
if not self.module.check_mode:
res = self.cs.registerTemplate(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
template = res['template']
return template
def get_template(self):
args = {}
args['isready'] = self.module.params.get('is_ready')
args['templatefilter'] = self.module.params.get('template_filter')
args['domainid'] = self.get_domain(key='id')
args['account'] = self.get_account(key='name')
args['projectid'] = self.get_project(key='id')
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
# if checksum is set, we only look on that.
checksum = self.module.params.get('checksum')
if not checksum:
args['name'] = self.module.params.get('name')
templates = self.cs.listTemplates(**args)
if templates:
# if checksum is set, we only look on that.
if not checksum:
return templates['template'][0]
else:
for i in templates['template']:
if 'checksum' in i and i['checksum'] == checksum:
return i
return None
def extract_template(self):
template = self.get_template()
if not template:
self.module.fail_json(msg="Failed: template not found")
args = {}
args['id'] = template['id']
args['url'] = self.module.params.get('url')
args['mode'] = self.module.params.get('mode')
args['zoneid'] = self.get_zone(key='id')
self.result['changed'] = True
if not self.module.check_mode:
template = self.cs.extractTemplate(**args)
if 'errortext' in template:
self.module.fail_json(msg="Failed: '%s'" % template['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
template = self.poll_job(template, 'template')
return template
def remove_template(self):
template = self.get_template()
if template:
self.result['changed'] = True
args = {}
args['id'] = template['id']
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
if not self.module.check_mode:
res = self.cs.deleteTemplate(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'template')
return template
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
display_text = dict(default=None),
url = dict(default=None),
vm = dict(default=None),
snapshot = dict(default=None),
os_type = dict(default=None),
is_ready = dict(type='bool', default=False),
is_public = dict(type='bool', default=True),
is_featured = dict(type='bool', default=False),
is_dynamically_scalable = dict(type='bool', default=False),
is_extractable = dict(type='bool', default=False),
is_routing = dict(type='bool', default=False),
checksum = dict(default=None),
template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
hypervisor = dict(choices=CS_HYPERVISORS, default=None),
requires_hvm = dict(type='bool', default=False),
password_enabled = dict(type='bool', default=False),
template_tag = dict(default=None),
sshkey_enabled = dict(type='bool', default=False),
format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None),
details = dict(default=None),
bits = dict(type='int', choices=[ 32, 64 ], default=64),
state = dict(choices=['present', 'absent', 'extracted'], default='present'),
cross_zones = dict(type='bool', default=False),
mode = dict(choices=['http_download', 'ftp_upload'], default='http_download'),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive = (
['url', 'vm'],
['zone', 'cross_zones'],
),
supports_check_mode=True
)
try:
acs_tpl = AnsibleCloudStackTemplate(module)
state = module.params.get('state')
if state in ['absent']:
tpl = acs_tpl.remove_template()
elif state in ['extracted']:
tpl = acs_tpl.extract_template()
else:
if module.params.get('url'):
tpl = acs_tpl.register_template()
elif module.params.get('vm'):
tpl = acs_tpl.create_template()
else:
module.fail_json(msg="one of the following is required on state=present: url,vm")
result = acs_tpl.get_result(tpl)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ryokochang/Slab-GCS | packages/IronPython.StdLib.2.7.5-beta1/content/Lib/uuid.py | 187 | 21095 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
# LC_ALL to get English output, 2>/dev/null to
# prevent output on stderr
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
with os.popen(cmd) as pipe:
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(
words[get_index(i)].replace(':', ''), 16)
except IOError:
continue
return None
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
else:
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
finally:
pipe.close()
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# The uuid_generate_* functions are broken on MacOS X 10.5, as noted
# in issue #8621 the function generates the same sequence of values
# in the parent process and all children created using fork (unless
# those children use exec as well).
#
# Assume that the uuid_generate functions are broken from 10.5 onward,
# the test can be adjusted when a later version is fixed.
import sys
if sys.platform == 'darwin':
import os
if int(os.uname()[2].split('.')[0]) >= 9:
_uuid_generate_random = _uuid_generate_time = None
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds//100) + 0x01b21dd213814000L
if _last_timestamp is not None and timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| gpl-3.0 |
josephmario/CodeIgniter | public/plugins/ionicons/builder/scripts/eotlitetool.py | 374 | 17505 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# John Daggett <jdaggett@mozilla.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
import optparse
import struct
class FontError(Exception):
"""Error related to font handling"""
pass
def multichar(str):
vals = struct.unpack('4B', str[:4])
return (vals[0] << 24) + (vals[1] << 16) + (vals[2] << 8) + vals[3]
def multicharval(v):
return struct.pack('4B', (v >> 24) & 0xFF, (v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF)
class EOT:
EOT_VERSION = 0x00020001
EOT_MAGIC_NUMBER = 0x504c
EOT_DEFAULT_CHARSET = 0x01
EOT_FAMILY_NAME_INDEX = 0 # order of names in variable portion of EOT header
EOT_STYLE_NAME_INDEX = 1
EOT_VERSION_NAME_INDEX = 2
EOT_FULL_NAME_INDEX = 3
EOT_NUM_NAMES = 4
EOT_HEADER_PACK = '<4L10B2BL2H7L18x'
class OpenType:
SFNT_CFF = multichar('OTTO') # Postscript CFF SFNT version
SFNT_TRUE = 0x10000 # Standard TrueType version
SFNT_APPLE = multichar('true') # Apple TrueType version
SFNT_UNPACK = '>I4H'
TABLE_DIR_UNPACK = '>4I'
TABLE_HEAD = multichar('head') # TrueType table tags
TABLE_NAME = multichar('name')
TABLE_OS2 = multichar('OS/2')
TABLE_GLYF = multichar('glyf')
TABLE_CFF = multichar('CFF ')
OS2_FSSELECTION_ITALIC = 0x1
OS2_UNPACK = '>4xH2xH22x10B4L4xH14x2L'
HEAD_UNPACK = '>8xL'
NAME_RECORD_UNPACK = '>6H'
NAME_ID_FAMILY = 1
NAME_ID_STYLE = 2
NAME_ID_UNIQUE = 3
NAME_ID_FULL = 4
NAME_ID_VERSION = 5
NAME_ID_POSTSCRIPT = 6
PLATFORM_ID_UNICODE = 0 # Mac OS uses this typically
PLATFORM_ID_MICROSOFT = 3
ENCODING_ID_MICROSOFT_UNICODEBMP = 1 # with Microsoft platformID BMP-only Unicode encoding
LANG_ID_MICROSOFT_EN_US = 0x0409 # with Microsoft platformID EN US lang code
def eotname(ttf):
i = ttf.rfind('.')
if i != -1:
ttf = ttf[:i]
return ttf + '.eotlite'
def readfont(f):
data = open(f, 'rb').read()
return data
def get_table_directory(data):
"""read the SFNT header and table directory"""
datalen = len(data)
sfntsize = struct.calcsize(OpenType.SFNT_UNPACK)
if sfntsize > datalen:
raise FontError, 'truncated font data'
sfntvers, numTables = struct.unpack(OpenType.SFNT_UNPACK, data[:sfntsize])[:2]
if sfntvers != OpenType.SFNT_CFF and sfntvers != OpenType.SFNT_TRUE:
raise FontError, 'invalid font type';
font = {}
font['version'] = sfntvers
font['numTables'] = numTables
# create set of offsets, lengths for tables
table_dir_size = struct.calcsize(OpenType.TABLE_DIR_UNPACK)
if sfntsize + table_dir_size * numTables > datalen:
raise FontError, 'truncated font data, table directory extends past end of data'
table_dir = {}
for i in range(0, numTables):
start = sfntsize + i * table_dir_size
end = start + table_dir_size
tag, check, bongo, dirlen = struct.unpack(OpenType.TABLE_DIR_UNPACK, data[start:end])
table_dir[tag] = {'offset': bongo, 'length': dirlen, 'checksum': check}
font['tableDir'] = table_dir
return font
def get_name_records(nametable):
"""reads through the name records within name table"""
name = {}
# read the header
headersize = 6
count, strOffset = struct.unpack('>2H', nametable[2:6])
namerecsize = struct.calcsize(OpenType.NAME_RECORD_UNPACK)
if count * namerecsize + headersize > len(nametable):
raise FontError, 'names exceed size of name table'
name['count'] = count
name['strOffset'] = strOffset
# read through the name records
namerecs = {}
for i in range(0, count):
start = headersize + i * namerecsize
end = start + namerecsize
platformID, encodingID, languageID, nameID, namelen, offset = struct.unpack(OpenType.NAME_RECORD_UNPACK, nametable[start:end])
if platformID != OpenType.PLATFORM_ID_MICROSOFT or \
encodingID != OpenType.ENCODING_ID_MICROSOFT_UNICODEBMP or \
languageID != OpenType.LANG_ID_MICROSOFT_EN_US:
continue
namerecs[nameID] = {'offset': offset, 'length': namelen}
name['namerecords'] = namerecs
return name
def make_eot_name_headers(fontdata, nameTableDir):
"""extracts names from the name table and generates the names header portion of the EOT header"""
nameoffset = nameTableDir['offset']
namelen = nameTableDir['length']
name = get_name_records(fontdata[nameoffset : nameoffset + namelen])
namestroffset = name['strOffset']
namerecs = name['namerecords']
eotnames = (OpenType.NAME_ID_FAMILY, OpenType.NAME_ID_STYLE, OpenType.NAME_ID_VERSION, OpenType.NAME_ID_FULL)
nameheaders = []
for nameid in eotnames:
if nameid in namerecs:
namerecord = namerecs[nameid]
noffset = namerecord['offset']
nlen = namerecord['length']
nformat = '%dH' % (nlen / 2) # length is in number of bytes
start = nameoffset + namestroffset + noffset
end = start + nlen
nstr = struct.unpack('>' + nformat, fontdata[start:end])
nameheaders.append(struct.pack('<H' + nformat + '2x', nlen, *nstr))
else:
nameheaders.append(struct.pack('4x')) # len = 0, padding = 0
return ''.join(nameheaders)
# just return a null-string (len = 0)
def make_root_string():
return struct.pack('2x')
def make_eot_header(fontdata):
"""given ttf font data produce an EOT header"""
fontDataSize = len(fontdata)
font = get_table_directory(fontdata)
# toss out .otf fonts, t2embed library doesn't support these
tableDir = font['tableDir']
# check for required tables
required = (OpenType.TABLE_HEAD, OpenType.TABLE_NAME, OpenType.TABLE_OS2)
for table in required:
if not (table in tableDir):
raise FontError, 'missing required table ' + multicharval(table)
# read name strings
# pull out data from individual tables to construct fixed header portion
# need to calculate eotSize before packing
version = EOT.EOT_VERSION
flags = 0
charset = EOT.EOT_DEFAULT_CHARSET
magicNumber = EOT.EOT_MAGIC_NUMBER
# read values from OS/2 table
os2Dir = tableDir[OpenType.TABLE_OS2]
os2offset = os2Dir['offset']
os2size = struct.calcsize(OpenType.OS2_UNPACK)
if os2size > os2Dir['length']:
raise FontError, 'OS/2 table invalid length'
os2fields = struct.unpack(OpenType.OS2_UNPACK, fontdata[os2offset : os2offset + os2size])
panose = []
urange = []
codepage = []
weight, fsType = os2fields[:2]
panose[:10] = os2fields[2:12]
urange[:4] = os2fields[12:16]
fsSelection = os2fields[16]
codepage[:2] = os2fields[17:19]
italic = fsSelection & OpenType.OS2_FSSELECTION_ITALIC
# read in values from head table
headDir = tableDir[OpenType.TABLE_HEAD]
headoffset = headDir['offset']
headsize = struct.calcsize(OpenType.HEAD_UNPACK)
if headsize > headDir['length']:
raise FontError, 'head table invalid length'
headfields = struct.unpack(OpenType.HEAD_UNPACK, fontdata[headoffset : headoffset + headsize])
checkSumAdjustment = headfields[0]
# make name headers
nameheaders = make_eot_name_headers(fontdata, tableDir[OpenType.TABLE_NAME])
rootstring = make_root_string()
# calculate the total eot size
eotSize = struct.calcsize(EOT.EOT_HEADER_PACK) + len(nameheaders) + len(rootstring) + fontDataSize
fixed = struct.pack(EOT.EOT_HEADER_PACK,
*([eotSize, fontDataSize, version, flags] + panose + [charset, italic] +
[weight, fsType, magicNumber] + urange + codepage + [checkSumAdjustment]))
return ''.join((fixed, nameheaders, rootstring))
def write_eot_font(eot, header, data):
open(eot,'wb').write(''.join((header, data)))
return
def main():
# deal with options
p = optparse.OptionParser()
p.add_option('--output', '-o', default="world")
options, args = p.parse_args()
# iterate over font files
for f in args:
data = readfont(f)
if len(data) == 0:
print 'Error reading %s' % f
else:
eot = eotname(f)
header = make_eot_header(data)
write_eot_font(eot, header, data)
if __name__ == '__main__':
main()
| mit |
jambolo/bitcoin | test/functional/wallet_keypool_topup.py | 5 | 4373 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class KeypoolRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename)
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
assert address_details["isscript"] and not address_details["iswitness"]
else:
assert not address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
self.connect_nodes(0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if self.options.descriptors:
if output_type == 'legacy':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/44'/1'/0'/0/110")
elif output_type == 'p2sh-segwit':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49'/1'/0'/0/110")
elif output_type == 'bech32':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84'/1'/0'/0/110")
else:
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| mit |
faun/django_test | build/lib/django/contrib/localflavor/es/es_provinces.py | 436 | 1482 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
PROVINCE_CHOICES = (
('01', _('Arava')),
('02', _('Albacete')),
('03', _('Alacant')),
('04', _('Almeria')),
('05', _('Avila')),
('06', _('Badajoz')),
('07', _('Illes Balears')),
('08', _('Barcelona')),
('09', _('Burgos')),
('10', _('Caceres')),
('11', _('Cadiz')),
('12', _('Castello')),
('13', _('Ciudad Real')),
('14', _('Cordoba')),
('15', _('A Coruna')),
('16', _('Cuenca')),
('17', _('Girona')),
('18', _('Granada')),
('19', _('Guadalajara')),
('20', _('Guipuzkoa')),
('21', _('Huelva')),
('22', _('Huesca')),
('23', _('Jaen')),
('24', _('Leon')),
('25', _('Lleida')),
('26', _('La Rioja')),
('27', _('Lugo')),
('28', _('Madrid')),
('29', _('Malaga')),
('30', _('Murcia')),
('31', _('Navarre')),
('32', _('Ourense')),
('33', _('Asturias')),
('34', _('Palencia')),
('35', _('Las Palmas')),
('36', _('Pontevedra')),
('37', _('Salamanca')),
('38', _('Santa Cruz de Tenerife')),
('39', _('Cantabria')),
('40', _('Segovia')),
('41', _('Seville')),
('42', _('Soria')),
('43', _('Tarragona')),
('44', _('Teruel')),
('45', _('Toledo')),
('46', _('Valencia')),
('47', _('Valladolid')),
('48', _('Bizkaia')),
('49', _('Zamora')),
('50', _('Zaragoza')),
('51', _('Ceuta')),
('52', _('Melilla')),
)
| bsd-3-clause |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/docutils/languages/fr.py | 52 | 1825 | # $Id: fr.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Stefane Fermigier <sf@fermigier.com>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Contact',
'version': 'Version',
'revision': 'R\u00e9vision',
'status': 'Statut',
'date': 'Date',
'copyright': 'Copyright',
'dedication': 'D\u00e9dicace',
'abstract': 'R\u00e9sum\u00e9',
'attention': 'Attention!',
'caution': 'Avertissement!',
'danger': '!DANGER!',
'error': 'Erreur',
'hint': 'Indication',
'important': 'Important',
'note': 'Note',
'tip': 'Astuce',
'warning': 'Avis',
'contents': 'Sommaire'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'auteur': 'author',
'auteurs': 'authors',
'organisation': 'organization',
'adresse': 'address',
'contact': 'contact',
'version': 'version',
'r\u00e9vision': 'revision',
'statut': 'status',
'date': 'date',
'copyright': 'copyright',
'd\u00e9dicace': 'dedication',
'r\u00e9sum\u00e9': 'abstract'}
"""French (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| mit |
urinieto/SegmenterMIREX2014 | pymf/chnmf.py | 1 | 7770 | #!/usr/bin/python
#
# Copyright (C) Christian Thurau, 2010.
# Licensed under the GNU General Public License (GPL).
# http://www.gnu.org/licenses/gpl.txt
"""
PyMF Convex Hull Non-negative Matrix Factorization [1]
CHNMF(NMF) : Class for Convex-hull NMF
quickhull : Function for finding the convex hull in 2D
[1] C. Thurau, K. Kersting, and C. Bauckhage. Convex Non-Negative Matrix
Factorization in the Wild. ICDM 2009.
"""
import numpy as np
from itertools import combinations
from dist import vq
from pca import PCA
from aa import AA
__all__ = ["CHNMF"]
def quickhull(sample):
""" Find data points on the convex hull of a supplied data set
Args:
sample: data points as column vectors n x d
n - number samples
d - data dimension (should be two)
Returns:
a k x d matrix containint the convex hull data points
"""
link = lambda a, b: np.concatenate((a, b[1:]))
edge = lambda a, b: np.concatenate(([a], [b]))
def dome(sample, base):
h, t = base
dists = np.dot(sample - h, np.dot(((0, -1), (1, 0)), (t - h)))
outer = np.repeat(sample, dists > 0, axis=0)
if len(outer):
pivot = sample[np.argmax(dists)]
return link(dome(outer, edge(h, pivot)),
dome(outer, edge(pivot, t)))
else:
return base
if len(sample) > 2:
axis = sample[:, 0]
base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)
return link(dome(sample, base),
dome(sample, base[::-1]))
else:
return sample
class CHNMF(AA):
"""
CHNMF(data, num_bases=4)
Convex Hull Non-negative Matrix Factorization. Factorize a data matrix into
two matrices s.t. F = | data - W*H | is minimal. H is restricted to convexity
(H >=0, sum(H, axis=1) = [1 .. 1]) and W resides on actual data points.
Factorization is solved via an alternating least squares optimization using
the quadratic programming solver from cvxopt. The results are usually
equivalent to Archetypal Analysis (pymf.AA) but CHNMF also works for very
large datasets.
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
base_sel: int,
Number of pairwise basis vector projections. Set to a value< rank(data).
Computation time scale exponentially with this value, usually rather low
values are sufficient (3-10).
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying CHNMF to some rather stupid data set:
>>> import numpy as np
>>> from chnmf import CHNMF
>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])
Use 2 basis vectors -> W shape(data_dimension, 2).
>>> chnmf_mdl = CHNMF(data, num_bases=2)
And start computing the factorization.
>>> chnmf_mdl.factorize()
The basis vectors are now stored in chnmf_mdl.W, the coefficients in
chnmf_mdl.H. To compute coefficients for an existing set of basis vectors
simply copy W to chnmf_mdl.W, and set compute_w to False:
>>> data = np.array([[1.5, 2.0], [1.2, 1.8]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> chnmf_mdl = CHNMF(data, num_bases=2)
>>> chnmf_mdl.W = W
>>> chnmf_mdl.factorize(compute_w=False)
The result is a set of coefficients chnmf_mdl.H, s.t. data = W * chnmf_mdl.H.
"""
def __init__(self, data, num_bases=4, base_sel=3):
# call inherited method
AA.__init__(self, data, num_bases=num_bases)
# base sel should never be larger than the actual data dimension
self._base_sel = base_sel
if base_sel > self.data.shape[0]:
self._base_sel = self.data.shape[0]
def init_h(self):
self.H = np.zeros((self._num_bases, self._num_samples))
def init_w(self):
self.W = np.zeros((self._data_dimension, self._num_bases))
def _map_w_to_data(self):
""" Return data points that are most similar to basis vectors W
"""
# assign W to the next best data sample
self._Wmapped_index = vq(self.data, self.W)
self.Wmapped = np.zeros(self.W.shape)
# do not directly assign, i.e. Wdist = self.data[:,sel]
# as self might be unsorted (in non ascending order)
# -> sorting sel would screw the matching to W if
# self.data is stored as a hdf5 table (see h5py)
for i, s in enumerate(self._Wmapped_index):
self.Wmapped[:,i] = self.data[:,s]
def update_w(self):
""" compute new W """
def select_hull_points(data, n=3):
""" select data points for pairwise projections of the first n
dimensions """
# iterate over all projections and select data points
idx = np.array([])
# iterate over some pairwise combinations of dimensions
for i in combinations(range(n), 2):
# sample convex hull points in 2D projection
convex_hull_d = quickhull(data[i, :].T)
# get indices for convex hull data points
idx = np.append(idx, vq(data[i, :], convex_hull_d.T))
idx = np.unique(idx)
return np.int32(idx)
# determine convex hull data points using either PCA or random
# projections
method = 'randomprojection'
if method == 'pca':
pcamodel = PCA(self.data)
pcamodel.factorize(show_progress=False)
proj = pcamodel.H
else:
R = np.random.randn(self._base_sel, self._data_dimension)
proj = np.dot(R, self.data)
self._hull_idx = select_hull_points(proj, n=self._base_sel)
aa_mdl = AA(self.data[:, self._hull_idx], num_bases=self._num_bases)
# determine W
aa_mdl.factorize(niter=50, compute_h=True, compute_w=True,
compute_err=True, show_progress=False)
self.W = aa_mdl.W
self._map_w_to_data()
def factorize(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, niter=1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
AA.factorize(self, niter=1, show_progress=show_progress,
compute_w=compute_w, compute_h=compute_h,
compute_err=compute_err)
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
tempbottle/cjdns | node_build/dependencies/libuv/build/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| gpl-3.0 |
hcleon/zxingExtend | cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/ar.py | 34 | 2198 | """SCons.Tool.ar
Tool-specific initialization for ar (library archive).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ar.py 5023 2010/06/14 22:05:46 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('rc')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
if env.Detect('ranlib'):
env['RANLIB'] = 'ranlib'
env['RANLIBFLAGS'] = SCons.Util.CLVar('')
env['RANLIBCOM'] = '$RANLIB $RANLIBFLAGS $TARGET'
def exists(env):
return env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
SuperNascher/mumble | scripts/generate-cipherinfo.py | 3 | 9129 | #!/usr/bin/env python
#
# Copyright 2005-2016 The Mumble Developers. All rights reserved.
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file at the root of the
# Mumble source tree or at <https://www.mumble.info/LICENSE>.
#
#
# cipherinfo.py
# Generate static TLS cipher information for Mumble.
from __future__ import (unicode_literals, print_function, division)
import json
import re
import subprocess
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
from xml.dom import minidom
IETF_TLS_PARAMETERS_WWW = "https://www.ietf.org/assignments/tls-parameters/tls-parameters.xml"
def rfcNameLut():
lut = {}
# Auto-generate from IETF_TLS_PARAMETERS_WWW
u = urlopen(IETF_TLS_PARAMETERS_WWW)
s = u.read()
s = s.decode('utf-8')
u.close()
dom = minidom.parseString(s)
registries = dom.getElementsByTagName("registry")
for registry in registries:
ident = registry.getAttribute("id")
if ident == "tls-parameters-4":
records = registry.getElementsByTagName("record")
for record in records:
value = record.getElementsByTagName("value")[0].childNodes[0].nodeValue.strip()
description = record.getElementsByTagName("description")[0].childNodes[0].nodeValue.strip()
# Skip free-form informational entries that use ranges (-) and *
# in their value.
if re.match("^[0xA-Z1-9,]*$", value) is None:
continue
lut[value] = description
##########################################################################
# Obsolete SSLv2 cipher suites from RFC 4346, Appendix E:
##########################################################################
# TLS_RC4_128_WITH_MD5 # 0x01, 0x00, 0x80
lut["0x01,0x00,0x80"] = "TLS_RC4_128_WITH_MD5"
# TLS_RC4_128_EXPORT40_WITH_MD5 # 0x02, 0x00, 0x80
lut["0x02,0x00,0x80"] = "TLS_RC4_128_EXPORT40_WITH_MD5"
# TLS_RC2_CBC_128_CBC_WITH_MD5 # 0x03, 0x00, 0x80
lut["0x03,0x00,0x80"] = "TLS_RC2_CBC_128_CBC_WITH_MD5"
# TLS_RC2_CBC_128_CBC_EXPORT40_WITH_MD5 # 0x04, 0x00, 0x80
lut["0x04,0x00,0x80"] = "TLS_RC2_CBC_128_CBC_EXPORT40_WITH_MD5"
# TLS_IDEA_128_CBC_WITH_MD5 # 0x05, 0x00, 0x80
lut["0x05,0x00,0x80"] = "TLS_IDEA_128_CBC_WITH_MD5"
# TLS_DES_64_CBC_WITH_MD5 # 0x06, 0x00, 0x40
lut["0x06,0x00,0x40"] = "TLS_DES_64_CBC_WITH_MD5"
# TLS_DES_192_EDE3_CBC_WITH_MD5 # 0x07, 0x00, 0xC0
lut["0x07,0x00,0xC0"] = "TLS_DES_192_EDE3_CBC_WITH_MD5"
return lut
def opensslCiphersOutput():
p = subprocess.Popen(['openssl', 'ciphers', '-V', 'ALL'], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if stdout is not None:
stdout = stdout.decode('utf-8')
if stderr is not None:
stderr = stderr.decode('utf-8')
if p.returncode != 0:
raise Exception('"openssl ciphers" failed: %s', stderr)
return stdout
def extract(splat):
if len(splat) < 8:
splat.extend([''])
return splat
def Cstr(val):
if val is None:
return 'NULL'
return '"{0}"'.format(val)
def CPPbool(val):
if val is True:
return 'true'
return 'false'
def main():
added_ids = []
output = []
lut = rfcNameLut()
ciphers = opensslCiphersOutput()
for line in ciphers.split('\n'):
if len(line) == 0:
continue
line = line.replace(' - ', ' ')
line = line.replace('Kx=', '')
line = line.replace('Au=', '')
line = line.replace('Enc=', '')
line = line.replace('Mac=', '')
line = line.replace('(', '_')
line = line.replace(')', '')
line = line.replace('/', '_')
tabline = re.sub('\ +', '', line, 1)
tabline = re.sub('\ +', '\t', tabline)
splat = tabline.split('\t')
ident, osslname, minproto, kx, au, enc, mac, exp = extract(splat)
# Look up the RFC name of this cipher suite.
if ident in lut:
rfcname = lut[ident]
else:
raise Exception('missing rfc_name in lut for %s' % ident)
# Normalize kx, au, enc and mac.
if 'None' in au:
au = 'Anonymous'
enc = enc.upper()
if 'AESGCM' in enc:
enc = enc.replace('GCM', '')
enc = enc + '_GCM'
if 'GCM' in rfcname and not 'GCM' in enc:
enc = enc + '_GCM'
elif 'EDE_CBC' in rfcname:
enc = enc + '_EDE_CBC'
elif 'CBC' in rfcname:
enc = enc + '_CBC'
elif 'CCM_8' in rfcname:
enc = enc + '_CCM_8'
elif 'CCM' in rfcname:
enc = enc + '_CCM'
if 'ECDHE' in osslname and not 'ECDHE' in kx:
kx = kx.replace('ECDH', 'ECDHE')
if 'ECDHE' in osslname and not 'ECDHE' in au:
au = au.replace('ECDH', 'ECDHE')
if 'DHE' in osslname and not 'DHE' in kx:
kx = kx.replace('DH', 'DHE')
if 'DHE' in osslname and not 'DHE' in au:
au = au.replace('DH', 'DHE')
if 'EDH' in osslname and not 'EDH' in kx:
kx = kx.replace('DH', 'DHE')
if 'EDH' in osslname and not 'EDH' in au:
au = au.replace('DH', 'DHE')
if mac != 'AEAD':
mac = 'HMAC-' + mac
# Validate macs
valid_macs = ['AEAD', 'HMAC-MD5', 'HMAC-SHA1', 'HMAC-SHA256', 'HMAC-SHA384']
if mac not in valid_macs:
raise Exception('invalid mac found: %s' % mac)
# Use key exchange names from the RFCs, but also create
# verbose key exchange names for export ciphers.
match = re.match('^(TLS_|SSL_)(.*)_WITH.*$', rfcname)
valid_rfc_kex = [
"ECDHE_RSA",
"ECDHE_ECDSA",
"SRP_SHA_DSS",
"SRP_SHA_RSA",
"SRP_SHA",
"DHE_DSS",
"DHE_RSA",
"ECDH_anon",
"DH_anon",
"ECDH_RSA",
"ECDH_ECDSA",
"RSA",
"PSK",
]
valid_export_rfc_kex = [
"DHE_RSA_EXPORT",
"DHE_DSS_EXPORT",
"DHE_DSS_EXPORT",
"DH_anon_EXPORT",
"RSA_EXPORT",
]
skip_rfc_kex = [
"IDEA_128_CBC",
"RC2_CBC_128_CBC",
"RC4_128",
"DES_192_EDE3_CBC",
"DES_64_CBC",
"IDEA_128_CBC",
"RC2_CBC_128_CBC_EXPORT40",
"RC4_128_EXPORT40"
]
if match is not None:
rfc_kex = match.groups()[1]
rfc_verbose_kex = rfc_kex
if rfc_kex in skip_rfc_kex:
rfc_kex = kx
rfc_verbose_kex = kx
elif rfc_kex in valid_rfc_kex:
pass
elif rfc_kex in valid_export_rfc_kex:
if rfc_kex == 'DHE_RSA_EXPORT':
rfc_verbose_kex = 'DHE_512_RSA_EXPORT'
elif rfc_kex == 'DHE_DSS_EXPORT':
rfc_verbose_kex = 'DHE_512_DSS_EXPORT'
elif rfc_kex == 'DH_anon_EXPORT':
rfc_verbose_kex = 'DH_anon_512_EXPORT'
elif rfc_kex == 'RSA_EXPORT':
rfc_verbose_kex = 'RSA_512_EXPORT'
else:
raise Exception('missing check for rfc_kex?')
else:
raise Exception('bad rfc_kex found: %s' % rfc_kex)
pfs = False
if rfc_verbose_kex == 'ECDHE_RSA':
pfs = True
elif rfc_verbose_kex == 'ECDHE_ECDSA':
pfs = True
elif rfc_verbose_kex == 'DHE_RSA':
pfs = True
elif rfc_verbose_kex == 'DHE_DSS':
pfs = True
elif rfc_verbose_kex == 'DHE_512_RSA_EXPORT':
pfs = True
elif rfc_verbose_kex == 'DHE_512_DSS_EXPORT':
pfs = True
# XXX: should SRP be marked as forward_secret?
output.append({
'identifier': ident,
'openssl_name': osslname,
'rfc_name': rfcname,
'minimum_protocol': minproto,
'key_exchange': rfc_kex,
'key_exchange_verbose': rfc_verbose_kex,
'openssl_key_exchange': kx,
'openssl_authentication': au,
'key_exchange': rfc_kex,
'encryption': enc,
'message_authentication': mac,
'export': exp == 'export',
'forward_secret': pfs
})
added_ids.append(ident)
# Add everything we missed from OpenSSL...
include_extras = False
if include_extras:
for key in lut.keys():
if not key in added_ids:
output.append({
'identifier': key,
'openssl_name': None,
'rfc_name': lut[key],
'minimum_protocol': None,
'key_exchange': None,
'key_exchange_verbose': None,
'openssl_key_exchange': None,
'openssl_authentication': None,
'key_exchange': None,
'encryption': None,
'message_authentication': None,
'export': None,
'forward_secret': None
})
output_c = True
if output_c:
print('// Automatically generated by "cipherinfo.py". DO NOT EDIT BY HAND.')
print('//')
print('// I also agree to have manually vetted this file for correctness.')
print('//')
print('// If I do not agree, I will not have removed the line above saying')
print('// otherwise. Nor will I have removed the line below this one which')
print('// will cause a preprocessor error. Oops!')
print('#error Please verify this file is correct')
print('static const SSLCipherInfo cipher_info_lookup_table[] = {')
for entry in output:
print('\t{')
print('\t\t// openssl_name')
print('\t\t{0},'.format(Cstr(entry["openssl_name"])))
print('\t\t// rfc_name')
print('\t\t{0},'.format(Cstr(entry["rfc_name"])))
print('\t\t// encryption')
print('\t\t{0},'.format(Cstr(entry["encryption"])))
print('\t\t// key_exchange_verbose. kx = {0}, au = {1}'.format(entry["openssl_key_exchange"], entry["openssl_authentication"]))
print('\t\t{0},'.format(Cstr(entry["key_exchange_verbose"])))
print('\t\t// forward secret')
print('\t\t{0},'.format(CPPbool(entry["forward_secret"])))
print('\t\t// message authentication')
print('\t\t{0},'.format(Cstr(entry["message_authentication"])))
print('\t},')
print('};')
else:
print(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))
if __name__ == '__main__':
main()
| bsd-3-clause |
kevinchen3315/gyp-git | test/dependencies/gyptest-lib-only.py | 161 | 1192 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a link time only dependency will get pulled into the set of built
targets, even if no executable uses it.
"""
import TestGyp
import sys
test = TestGyp.TestGyp()
test.run_gyp('lib_only.gyp')
test.build('lib_only.gyp', test.ALL)
test.built_file_must_exist('a', type=test.STATIC_LIB)
# TODO(bradnelson/mark):
# On linux and windows a library target will at least pull its link dependencies
# into the generated project, since not doing so confuses users.
# This is not currently implemented on mac, which has the opposite behavior.
if sys.platform == 'darwin':
if test.format == 'xcode':
test.built_file_must_not_exist('b', type=test.STATIC_LIB)
else:
assert test.format in ('make', 'ninja')
test.built_file_must_exist('b', type=test.STATIC_LIB)
else:
# Make puts the resulting library in a directory matching the input gyp file;
# for the 'b' library, that is in the 'b' subdirectory.
test.built_file_must_exist('b', type=test.STATIC_LIB, subdir='b')
test.pass_test()
| bsd-3-clause |
bgammill/namebench | libnamebench/sys_nameservers.py | 173 | 3811 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to get information about system DNS servers."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import glob
import os
import subprocess
import sys
import time
if __name__ == '__main__':
sys.path.append('../nb_third_party')
# 3rd party libraries
import dns.resolver
# local libs
import addr_util
MAX_LEASE_AGE = 24 * 3600
MIN_LEASE_FILE_SIZE = 1024
def GetAllSystemNameServers():
servers = list(set(GetCurrentNameServers() + GetAssignedNameServers()))
print servers
return servers
def GetCurrentNameServers():
"""Return list of DNS server IP's used by the host via dnspython"""
try:
servers = dns.resolver.Resolver().nameservers
except:
print "Unable to get list of internal DNS servers."
servers = []
# dnspython does not always get things right on Windows, particularly in
# versions with right-to-left languages. Fall back to ipconfig /all
if not servers and sys.platform[:3] == 'win':
return _GetNameServersFromWinIpConfig()
return servers
def GetAssignedNameServers():
"""Servers assigned by DHCP."""
if sys.platform == 'darwin':
return _GetNameServersFromMacIpConfig()
else:
return _GetNameServersFromDhclient()
def _GetNameServersFromMacIpConfig():
servers = []
ifcount = subprocess.Popen(['ipconfig', 'ifcount'], stdout=subprocess.PIPE).stdout.read()
interfaces = ["en%s" % (int(x)-1) for x in range(int(ifcount))]
for iface in interfaces:
output = subprocess.Popen(['ipconfig', 'getpacket', iface], stdout=subprocess.PIPE).stdout.read()
for line in output.split('\n'):
if 'domain_name_server' in line:
# print "%s domain_name_server: %s" % (iface, line)
servers.extend(addr_util.ExtractIPsFromString(line))
return servers
def _GetNameServersFromWinIpConfig():
"""Return a list of DNS servers via ipconfig (Windows only)"""
servers = []
output = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE).stdout.read()
for line in output.split('\r\n'):
if 'DNS Servers' in line:
print "ipconfig: %s" % line
servers.extend(addr_util.ExtractIPsFromString(line))
return servers
def _GetNameServersFromDhclient():
path = _FindNewestDhclientLeaseFile()
if not path:
return []
# We want the last matching line in the file
for line in open(path):
if 'option domain-name-servers' in line:
ns_string = line
if ns_string:
return addr_util.ExtractIPsFromString(ns_string)
else:
return []
def _FindNewestDhclientLeaseFile():
paths = [
'/var/lib/dhcp3/dhclient.*leases'
]
found = []
for path in paths:
for filename in glob.glob(path):
if os.path.getsize(filename) < MIN_LEASE_FILE_SIZE:
continue
elif time.time() - os.path.getmtime(filename) > MAX_LEASE_AGE:
continue
else:
try:
fp = open(filename, 'rb')
fp.close()
found.append(filename)
except:
continue
if found:
return sorted(found, key=os.path.getmtime)[-1]
else:
return None
if __name__ == '__main__':
print "Current: %s" % GetCurrentNameServers()
print "Assigned: %s" % GetAssignedNameServers()
print "System: %s" % GetAllSystemNameServers()
| apache-2.0 |
thinkerou/grpc | tools/run_tests/run_microbenchmark.py | 11 | 9100 | #!/usr/bin/env python
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import multiprocessing
import os
import subprocess
import sys
import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks',
'bm_diff'))
import bm_constants
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
start_port_server.start_port_server()
def fnize(s):
out = ''
for c in s:
if c in '<>, /':
if len(out) and out[-1] == '_': continue
out += '_'
else:
out += c
return out
# index html
index_html = """
<html>
<head>
<title>Microbenchmark Results</title>
</head>
<body>
"""
def heading(name):
global index_html
index_html += "<h1>%s</h1>\n" % name
def link(txt, tgt):
global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
cgi.escape(tgt, quote=True), cgi.escape(txt))
def text(txt):
global index_html
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
def collect_latency(bm_name, args):
"""generate latency profiles"""
benchmarks = []
profile_analysis = []
cleanup = []
heading('Latency Profiles: %s' % bm_name)
subprocess.check_call([
'make', bm_name, 'CONFIG=basicprof', '-j',
'%d' % multiprocessing.cpu_count()
])
for line in subprocess.check_output(
['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec(
[
'bins/basicprof/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=0.05'
],
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
shortname='profile-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec(
[
sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
'--source',
'%s.trace' % fnize(line), '--fmt', 'simple', '--out',
'reports/%s.txt' % fnize(line)
],
timeout_seconds=20 * 60,
shortname='analyze-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
# periodically flush out the list of jobs: profile_analysis jobs at least
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(
benchmarks, maxjobs=max(1,
multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_perf(bm_name, args):
"""generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name)
subprocess.check_call([
'make', bm_name, 'CONFIG=mutrace', '-j',
'%d' % multiprocessing.cpu_count()
])
benchmarks = []
profile_analysis = []
cleanup = []
for line in subprocess.check_output(
['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec(
[
'perf', 'record', '-o',
'%s-perf.data' % fnize(line), '-g', '-F', '997',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
],
shortname='perf-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec(
[
'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
],
environ={
'PERF_BASE_NAME': fnize(line),
'OUTPUT_DIR': 'reports',
'OUTPUT_FILENAME': fnize(line),
},
shortname='flame-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
# periodically flush out the list of jobs: temporary space required for this
# processing is large
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call([
'make', bm_name,
'CONFIG=%s' % cfg, '-j',
'%d' % multiprocessing.cpu_count()
])
cmd = [
'bins/%s/%s' % (cfg, bm_name),
'--benchmark_out=%s.%s.json' % (base_json_name, cfg),
'--benchmark_out_format=json'
]
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
return subprocess.check_output(cmd)
def collect_summary(bm_name, args):
heading('Summary: %s [no counters]' % bm_name)
text(run_summary(bm_name, 'opt', bm_name))
heading('Summary: %s [with counters]' % bm_name)
text(run_summary(bm_name, 'counters', bm_name))
if args.bigquery_upload:
with open('%s.csv' % bm_name, 'w') as f:
f.write(
subprocess.check_output([
'tools/profiling/microbenchmarks/bm2bq.py',
'%s.counters.json' % bm_name,
'%s.opt.json' % bm_name
]))
subprocess.check_call([
'bq', 'load', 'microbenchmarks.microbenchmarks',
'%s.csv' % bm_name
])
collectors = {
'latency': collect_latency,
'perf': collect_perf,
'summary': collect_summary,
}
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument(
'-c',
'--collect',
choices=sorted(collectors.keys()),
nargs='*',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument(
'-b',
'--benchmarks',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
nargs='+',
type=str,
help='Which microbenchmarks should be run')
argp.add_argument(
'--bigquery_upload',
default=False,
action='store_const',
const=True,
help='Upload results from summary collection to bigquery')
argp.add_argument(
'--summary_time',
default=None,
type=int,
help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
try:
for collect in args.collect:
for bm_name in args.benchmarks:
collectors[collect](bm_name, args)
finally:
if not os.path.exists('reports'):
os.makedirs('reports')
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f:
f.write(index_html)
| apache-2.0 |
pplu/botocore | botocore/credentials.py | 2 | 81776 | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import datetime
import logging
import os
import getpass
import threading
import json
import subprocess
from collections import namedtuple
from copy import deepcopy
from hashlib import sha1
from dateutil.parser import parse
from dateutil.tz import tzlocal, tzutc
import botocore.configloader
import botocore.compat
from botocore import UNSIGNED
from botocore.compat import total_seconds
from botocore.compat import compat_shell_split
from botocore.config import Config
from botocore.exceptions import UnknownCredentialError
from botocore.exceptions import PartialCredentialsError
from botocore.exceptions import ConfigNotFound
from botocore.exceptions import InvalidConfigError
from botocore.exceptions import InfiniteLoopConfigError
from botocore.exceptions import RefreshWithMFAUnsupportedError
from botocore.exceptions import MetadataRetrievalError
from botocore.exceptions import CredentialRetrievalError
from botocore.exceptions import UnauthorizedSSOTokenError
from botocore.utils import InstanceMetadataFetcher, parse_key_val_file
from botocore.utils import ContainerMetadataFetcher
from botocore.utils import FileWebIdentityTokenLoader
from botocore.utils import SSOTokenLoader
logger = logging.getLogger(__name__)
ReadOnlyCredentials = namedtuple('ReadOnlyCredentials',
['access_key', 'secret_key', 'token'])
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
disable_env_vars = session.instance_variables().get('profile') is not None
imds_config = {
'ec2_metadata_service_endpoint': session.get_config_variable(
'ec2_metadata_service_endpoint'),
'imds_use_ipv6': session.get_config_variable('imds_use_ipv6')
}
if cache is None:
cache = {}
env_provider = EnvProvider()
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
config=imds_config)
)
profile_provider_builder = ProfileProviderBuilder(
session, cache=cache, region_name=region_name)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
]),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
post_profile = [
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
class ProfileProviderBuilder(object):
"""This class handles the creation of profile based providers.
NOTE: This class is only intended for internal use.
This class handles the creation and ordering of the various credential
providers that primarly source their configuration from the shared config.
This is needed to enable sharing between the default credential chain and
the source profile chain created by the assume role provider.
"""
def __init__(self, session, cache=None, region_name=None,
sso_token_cache=None):
self._session = session
self._cache = cache
self._region_name = region_name
self._sso_token_cache = sso_token_cache
def providers(self, profile_name, disable_env_vars=False):
return [
self._create_web_identity_provider(
profile_name, disable_env_vars,
),
self._create_sso_provider(profile_name),
self._create_shared_credential_provider(profile_name),
self._create_process_provider(profile_name),
self._create_config_provider(profile_name),
]
def _create_process_provider(self, profile_name):
return ProcessProvider(
profile_name=profile_name,
load_config=lambda: self._session.full_config,
)
def _create_shared_credential_provider(self, profile_name):
credential_file = self._session.get_config_variable('credentials_file')
return SharedCredentialProvider(
profile_name=profile_name,
creds_filename=credential_file,
)
def _create_config_provider(self, profile_name):
config_file = self._session.get_config_variable('config_file')
return ConfigProvider(
profile_name=profile_name,
config_filename=config_file,
)
def _create_web_identity_provider(self, profile_name, disable_env_vars):
return AssumeRoleWithWebIdentityProvider(
load_config=lambda: self._session.full_config,
client_creator=_get_client_creator(
self._session, self._region_name),
cache=self._cache,
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
def _create_sso_provider(self, profile_name):
return SSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
def _parse_if_needed(value):
if isinstance(value, datetime.datetime):
return value
return parse(value)
def _serialize_if_needed(value, iso=False):
if isinstance(value, datetime.datetime):
if iso:
return value.isoformat()
return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
return value
def _get_client_creator(session, region_name):
def client_creator(service_name, **kwargs):
create_client_kwargs = {
'region_name': region_name
}
create_client_kwargs.update(**kwargs)
return session.create_client(service_name, **create_client_kwargs)
return client_creator
def create_assume_role_refresher(client, params):
def refresh():
response = client.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
def __call__(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return self._refresh()
return _Refresher(actual_refresh)
class JSONFileCache(object):
"""JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
"""
CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache'))
def __init__(self, working_dir=CACHE_DIR, dumps_func=None):
self._working_dir = working_dir
if dumps_func is None:
dumps_func = self._default_dumps
self._dumps = dumps_func
def _default_dumps(self, obj):
return json.dumps(obj, default=_serialize_if_needed)
def __contains__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
return os.path.isfile(actual_key)
def __getitem__(self, cache_key):
"""Retrieve value from a cache key."""
actual_key = self._convert_cache_key(cache_key)
try:
with open(actual_key) as f:
return json.load(f)
except (OSError, ValueError, IOError):
raise KeyError(cache_key)
def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
file_content = self._dumps(value)
except (TypeError, ValueError):
raise ValueError("Value cannot be cached, must be "
"JSON serializable: %s" % value)
if not os.path.isdir(self._working_dir):
os.makedirs(self._working_dir)
with os.fdopen(os.open(full_key,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.truncate()
f.write(file_content)
def _convert_cache_key(self, cache_key):
full_path = os.path.join(self._working_dir, cache_key + '.json')
return full_path
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = botocore.compat.ensure_unicode(self.access_key)
self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
# The time at which we'll attempt to refresh, but not
# block if someone else is refreshing.
_advisory_refresh_timeout = 15 * 60
# The time at which all threads will block waiting for
# refreshed credentials.
_mandatory_refresh_timeout = 10 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = ReadOnlyCredentials(
access_key, secret_key, token)
self._normalize()
def _normalize(self):
self._access_key = botocore.compat.ensure_unicode(self._access_key)
self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self, refresh_in=None):
"""Check if a refresh is needed.
A refresh is needed if the expiry time associated
with the temporary credentials is less than the
provided ``refresh_in``. If ``time_delta`` is not
provided, ``self.advisory_refresh_needed`` will be used.
For example, if your temporary credentials expire
in 10 minutes and the provided ``refresh_in`` is
``15 * 60``, then this function will return ``True``.
:type refresh_in: int
:param refresh_in: The number of seconds before the
credentials expire in which refresh attempts should
be made.
:return: True if refresh needed, False otherwise.
"""
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
if refresh_in is None:
refresh_in = self._advisory_refresh_timeout
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= refresh_in:
# There's enough time left. Don't refresh.
return False
logger.debug("Credentials need to be refreshed.")
return True
def _is_expired(self):
# Checks if the current credentials are expired.
return self.refresh_needed(refresh_in=0)
def _refresh(self):
# In the common case where we don't need a refresh, we
# can immediately exit and not require acquiring the
# refresh lock.
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# acquire() doesn't accept kwargs, but False is indicating
# that we should not block if we can't acquire the lock.
# If we aren't able to acquire the lock, we'll trigger
# the else clause.
if self._refresh_lock.acquire(False):
try:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
finally:
self._refresh_lock.release()
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're within the mandatory refresh window,
# we must block until we get refreshed credentials.
with self._refresh_lock:
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
self._protected_refresh(is_mandatory=True)
def _protected_refresh(self, is_mandatory):
# precondition: this method should only be called if you've acquired
# the self._refresh_lock.
try:
metadata = self._refresh_using()
except Exception as e:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
# We successfully refreshed credentials but for whatever
# reason, our refreshing function returned credentials
# that are still expired. In this scenario, the only
# thing we can do is let the user know and raise
# an exception.
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, data):
expected_keys = ['access_key', 'secret_key', 'token', 'expiry_time']
if not data:
missing_keys = expected_keys
else:
missing_keys = [k for k in expected_keys if k not in data]
if missing_keys:
message = "Credential refresh failed, response did not contain: %s"
raise CredentialRetrievalError(
provider=self.method,
error_msg=message % ', '.join(missing_keys),
)
self.access_key = data['access_key']
self.secret_key = data['secret_key']
self.token = data['token']
self._expiry_time = parse(data['expiry_time'])
logger.debug("Retrieved credentials will expire at: %s",
self._expiry_time)
self._normalize()
def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
self._refresh()
return self._frozen_credentials
class DeferredRefreshableCredentials(RefreshableCredentials):
"""Refreshable credentials that don't require initial credentials.
refresh_using will be called upon first access.
"""
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(DeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class CachedCredentialFetcher(object):
DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, cache=None, expiry_window_seconds=None):
if cache is None:
cache = {}
self._cache = cache
self._cache_key = self._create_cache_key()
if expiry_window_seconds is None:
expiry_window_seconds = self.DEFAULT_EXPIRY_WINDOW_SECONDS
self._expiry_window_seconds = expiry_window_seconds
def _create_cache_key(self):
raise NotImplementedError('_create_cache_key()')
def _make_file_safe(self, filename):
# Replace :, path sep, and / to make it the string filename safe.
filename = filename.replace(':', '_').replace(os.path.sep, '_')
return filename.replace('/', '_')
def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
def fetch_credentials(self):
return self._get_cached_credentials()
def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
def _load_from_cache(self):
if self._cache_key in self._cache:
creds = deepcopy(self._cache[self._cache_key])
if not self._is_expired(creds):
return creds
else:
logger.debug(
"Credentials were found in cache, but they are expired."
)
return None
def _write_to_cache(self, response):
self._cache[self._cache_key] = deepcopy(response)
def _is_expired(self, credentials):
"""Check if credentials are expired."""
end_time = _parse_if_needed(credentials['Credentials']['Expiration'])
seconds = total_seconds(end_time - _local_now())
return seconds < self._expiry_window_seconds
class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher):
def __init__(self, client_creator, role_arn, extra_args=None,
cache=None, expiry_window_seconds=None):
self._client_creator = client_creator
self._role_arn = role_arn
if extra_args is None:
self._assume_kwargs = {}
else:
self._assume_kwargs = deepcopy(extra_args)
self._assume_kwargs['RoleArn'] = self._role_arn
self._role_session_name = self._assume_kwargs.get('RoleSessionName')
self._using_default_session_name = False
if not self._role_session_name:
self._generate_assume_role_name()
super(BaseAssumeRoleCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _generate_assume_role_name(self):
self._role_session_name = 'botocore-session-%s' % (int(time.time()))
self._assume_kwargs['RoleSessionName'] = self._role_session_name
self._using_default_session_name = True
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = deepcopy(self._assume_kwargs)
# The role session name gets randomly generated, so we don't want it
# in the hash.
if self._using_default_session_name:
del args['RoleSessionName']
if 'Policy' in args:
# To have a predictable hash, the keys of the policy must be
# sorted, so we have to load it here to make sure it gets sorted
# later on.
args['Policy'] = json.loads(args['Policy'])
args = json.dumps(args, sort_keys=True)
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher):
def __init__(self, client_creator, source_credentials, role_arn,
extra_args=None, mfa_prompter=None, cache=None,
expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type source_credentials: Credentials
:param source_credentials: The credentials to use to create the
client for the call to AssumeRole.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type mfa_prompter: callable
:param mfa_prompter: A callable that returns input provided by the
user (i.e raw_input, getpass.getpass, etc.).
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._source_credentials = source_credentials
self._mfa_prompter = mfa_prompter
if self._mfa_prompter is None:
self._mfa_prompter = getpass.getpass
super(AssumeRoleCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = self._create_client()
return client.assume_role(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
mfa_serial = assume_role_kwargs.get('SerialNumber')
if mfa_serial is not None:
prompt = 'Enter MFA code for %s: ' % mfa_serial
token_code = self._mfa_prompter(prompt)
assume_role_kwargs['TokenCode'] = token_code
duration_seconds = assume_role_kwargs.get('DurationSeconds')
if duration_seconds is not None:
assume_role_kwargs['DurationSeconds'] = duration_seconds
return assume_role_kwargs
def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class AssumeRoleWithWebIdentityCredentialFetcher(
BaseAssumeRoleCredentialFetcher
):
def __init__(self, client_creator, web_identity_token_loader, role_arn,
extra_args=None, cache=None, expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type web_identity_token_loader: callable
:param web_identity_token_loader: A callable that takes no arguments
and returns a web identity token str.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._web_identity_token_loader = web_identity_token_loader
super(AssumeRoleWithWebIdentityCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = Config(signature_version=UNSIGNED)
client = self._client_creator('sts', config=config)
return client.assume_role_with_web_identity(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
identity_token = self._web_identity_token_loader()
assume_role_kwargs['WebIdentityToken'] = identity_token
return assume_role_kwargs
class CredentialProvider(object):
# A short name to identify the provider within botocore.
METHOD = None
# A name to identify the provider for use in cross-sdk features like
# assume role's `credential_source` configuration option. These names
# are to be treated in a case-insensitive way. NOTE: any providers not
# implemented in botocore MUST prefix their canonical names with
# 'custom' or we DO NOT guarantee that it will work with any features
# that this provides.
CANONICAL_NAME = None
def __init__(self, session=None):
self.session = session
def load(self):
"""
Loads the credentials from their source & sets them on the object.
Subclasses should implement this method (by reading from disk, the
environment, the network or wherever), returning ``True`` if they were
found & loaded.
If not found, this method should return ``False``, indictating that the
``CredentialResolver`` should fall back to the next available method.
The default implementation does nothing, assuming the user has set the
``access_key/secret_key/token`` themselves.
:returns: Whether credentials were found & set
:rtype: Credentials
"""
return True
def _extract_creds_from_mapping(self, mapping, *key_names):
found = []
for key_name in key_names:
try:
found.append(mapping[key_name])
except KeyError:
raise PartialCredentialsError(provider=self.METHOD,
cred_var=key_name)
return found
class ProcessProvider(CredentialProvider):
METHOD = 'custom-process'
def __init__(self, profile_name, load_config, popen=subprocess.Popen):
self._profile_name = profile_name
self._load_config = load_config
self._loaded_config = None
self._popen = popen
def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return RefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return Credentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = self._popen(process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
@property
def _credential_process(self):
if self._loaded_config is None:
self._loaded_config = self._load_config()
profile_config = self._loaded_config.get(
'profiles', {}).get(self._profile_name, {})
return profile_config.get('credential_process')
class InstanceMetadataProvider(CredentialProvider):
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
def __init__(self, iam_role_fetcher):
self._role_fetcher = iam_role_fetcher
def load(self):
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
creds = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class EnvProvider(CredentialProvider):
METHOD = 'env'
CANONICAL_NAME = 'Environment'
ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
# The token can come from either of these env var.
# AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION'
def __init__(self, environ=None, mapping=None):
"""
:param environ: The environment variables (defaults to
``os.environ`` if no value is provided).
:param mapping: An optional mapping of variable names to
environment variable names. Use this if you want to
change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
The dict can have up to 3 keys: ``access_key``, ``secret_key``,
``session_token``.
"""
if environ is None:
environ = os.environ
self.environ = environ
self._mapping = self._build_mapping(mapping)
def _build_mapping(self, mapping):
# Mapping of variable name to env var name.
var_mapping = {}
if mapping is None:
# Use the class var default.
var_mapping['access_key'] = self.ACCESS_KEY
var_mapping['secret_key'] = self.SECRET_KEY
var_mapping['token'] = self.TOKENS
var_mapping['expiry_time'] = self.EXPIRY_TIME
else:
var_mapping['access_key'] = mapping.get(
'access_key', self.ACCESS_KEY)
var_mapping['secret_key'] = mapping.get(
'secret_key', self.SECRET_KEY)
var_mapping['token'] = mapping.get(
'token', self.TOKENS)
if not isinstance(var_mapping['token'], list):
var_mapping['token'] = [var_mapping['token']]
var_mapping['expiry_time'] = mapping.get(
'expiry_time', self.EXPIRY_TIME)
return var_mapping
def load(self):
"""
Search for credentials in explicit environment variables.
"""
access_key = self.environ.get(self._mapping['access_key'], '')
if access_key:
logger.info('Found credentials in environment variables.')
fetcher = self._create_credentials_fetcher()
credentials = fetcher(require_expiry=False)
expiry_time = credentials['expiry_time']
if expiry_time is not None:
expiry_time = parse(expiry_time)
return RefreshableCredentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], expiry_time,
refresh_using=fetcher, method=self.METHOD
)
return Credentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], method=self.METHOD
)
else:
return None
def _create_credentials_fetcher(self):
mapping = self._mapping
method = self.METHOD
environ = self.environ
def fetch_credentials(require_expiry=True):
credentials = {}
access_key = environ.get(mapping['access_key'], '')
if not access_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['access_key'])
credentials['access_key'] = access_key
secret_key = environ.get(mapping['secret_key'], '')
if not secret_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['secret_key'])
credentials['secret_key'] = secret_key
credentials['token'] = None
for token_env_var in mapping['token']:
token = environ.get(token_env_var, '')
if token:
credentials['token'] = token
break
credentials['expiry_time'] = None
expiry_time = environ.get(mapping['expiry_time'], '')
if expiry_time:
credentials['expiry_time'] = expiry_time
if require_expiry and not expiry_time:
raise PartialCredentialsError(
provider=method, cred_var=mapping['expiry_time'])
return credentials
return fetch_credentials
class OriginalEC2Provider(CredentialProvider):
METHOD = 'ec2-credentials-file'
CANONICAL_NAME = 'Ec2Config'
CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
ACCESS_KEY = 'AWSAccessKeyId'
SECRET_KEY = 'AWSSecretKey'
def __init__(self, environ=None, parser=None):
if environ is None:
environ = os.environ
if parser is None:
parser = parse_key_val_file
self._environ = environ
self._parser = parser
def load(self):
"""
Search for a credential file used by original EC2 CLI tools.
"""
if 'AWS_CREDENTIAL_FILE' in self._environ:
full_path = os.path.expanduser(
self._environ['AWS_CREDENTIAL_FILE'])
creds = self._parser(full_path)
if self.ACCESS_KEY in creds:
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
access_key = creds[self.ACCESS_KEY]
secret_key = creds[self.SECRET_KEY]
# EC2 creds file doesn't support session tokens.
return Credentials(access_key, secret_key, method=self.METHOD)
else:
return None
class SharedCredentialProvider(CredentialProvider):
METHOD = 'shared-credentials-file'
CANONICAL_NAME = 'SharedCredentials'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, creds_filename, profile_name=None, ini_parser=None):
self._creds_filename = creds_filename
if profile_name is None:
profile_name = 'default'
self._profile_name = profile_name
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._ini_parser = ini_parser
def load(self):
try:
available_creds = self._ini_parser(self._creds_filename)
except ConfigNotFound:
return None
if self._profile_name in available_creds:
config = available_creds[self._profile_name]
if self.ACCESS_KEY in config:
logger.info("Found credentials in shared credentials file: %s",
self._creds_filename)
access_key, secret_key = self._extract_creds_from_mapping(
config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
def _get_session_token(self, config):
for token_envvar in self.TOKENS:
if token_envvar in config:
return config[token_envvar]
class ConfigProvider(CredentialProvider):
"""INI based config provider with profile sections."""
METHOD = 'config-file'
CANONICAL_NAME = 'SharedConfig'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, config_filename, profile_name, config_parser=None):
"""
:param config_filename: The session configuration scoped to the current
profile. This is available via ``session.config``.
:param profile_name: The name of the current profile.
:param config_parser: A config parser callable.
"""
self._config_filename = config_filename
self._profile_name = profile_name
if config_parser is None:
config_parser = botocore.configloader.load_config
self._config_parser = config_parser
def load(self):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
try:
full_config = self._config_parser(self._config_filename)
except ConfigNotFound:
return None
if self._profile_name in full_config['profiles']:
profile_config = full_config['profiles'][self._profile_name]
if self.ACCESS_KEY in profile_config:
logger.info("Credentials found in config file: %s",
self._config_filename)
access_key, secret_key = self._extract_creds_from_mapping(
profile_config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(profile_config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, profile_config):
for token_name in self.TOKENS:
if token_name in profile_config:
return profile_config[token_name]
class BotoProvider(CredentialProvider):
METHOD = 'boto-config'
CANONICAL_NAME = 'Boto2Config'
BOTO_CONFIG_ENV = 'BOTO_CONFIG'
DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
def __init__(self, environ=None, ini_parser=None):
if environ is None:
environ = os.environ
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._environ = environ
self._ini_parser = ini_parser
def load(self):
"""
Look for credentials in boto config file.
"""
if self.BOTO_CONFIG_ENV in self._environ:
potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
else:
potential_locations = self.DEFAULT_CONFIG_FILENAMES
for filename in potential_locations:
try:
config = self._ini_parser(filename)
except ConfigNotFound:
# Move on to the next potential config file name.
continue
if 'Credentials' in config:
credentials = config['Credentials']
if self.ACCESS_KEY in credentials:
logger.info("Found credentials in boto config file: %s",
filename)
access_key, secret_key = self._extract_creds_from_mapping(
credentials, self.ACCESS_KEY, self.SECRET_KEY)
return Credentials(access_key, secret_key,
method=self.METHOD)
class AssumeRoleProvider(CredentialProvider):
METHOD = 'assume-role'
# The AssumeRole provider is logically part of the SharedConfig and
# SharedCredentials providers. Since the purpose of the canonical name
# is to provide cross-sdk compatibility, calling code will need to be
# aware that either of those providers should be tied to the AssumeRole
# provider as much as possible.
CANONICAL_NAME = None
ROLE_CONFIG_VAR = 'role_arn'
WEB_IDENTITY_TOKE_FILE_VAR = 'web_identity_token_file'
# Credentials are considered expired (and will be refreshed) once the total
# remaining time left until the credentials expires is less than the
# EXPIRY_WINDOW.
EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, load_config, client_creator, cache, profile_name,
prompter=getpass.getpass, credential_sourcer=None,
profile_provider_builder=None):
"""
:type load_config: callable
:param load_config: A function that accepts no arguments, and
when called, will return the full configuration dictionary
for the session (``session.full_config``).
:type client_creator: callable
:param client_creator: A factory function that will create
a client when called. Has the same interface as
``botocore.session.Session.create_client``.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example
of this is the ``JSONFileCache`` class in the CLI.
:type profile_name: str
:param profile_name: The name of the profile.
:type prompter: callable
:param prompter: A callable that returns input provided
by the user (i.e raw_input, getpass.getpass, etc.).
:type credential_sourcer: CanonicalNameCredentialSourcer
:param credential_sourcer: A credential provider that takes a
configuration, which is used to provide the source credentials
for the STS call.
"""
#: The cache used to first check for assumed credentials.
#: This is checked before making the AssumeRole API
#: calls and can be useful if you have short lived
#: scripts and you'd like to avoid calling AssumeRole
#: until the credentials are expired.
self.cache = cache
self._load_config = load_config
# client_creator is a callable that creates function.
# It's basically session.create_client
self._client_creator = client_creator
self._profile_name = profile_name
self._prompter = prompter
# The _loaded_config attribute will be populated from the
# load_config() function once the configuration is actually
# loaded. The reason we go through all this instead of just
# requiring that the loaded_config be passed to us is to that
# we can defer configuration loaded until we actually try
# to load credentials (as opposed to when the object is
# instantiated).
self._loaded_config = {}
self._credential_sourcer = credential_sourcer
self._profile_provider_builder = profile_provider_builder
self._visited_profiles = [self._profile_name]
def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return self._load_creds_via_assume_role(self._profile_name)
def _has_assume_role_config_vars(self, profile):
return (
self.ROLE_CONFIG_VAR in profile and
# We need to ensure this provider doesn't look at a profile when
# the profile has configuration for web identity. Simply relying on
# the order in the credential chain is insufficient as it doesn't
# prevent the case when we're doing an assume role chain.
self.WEB_IDENTITY_TOKE_FILE_VAR not in profile
)
def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
def _get_role_config(self, profile_name):
"""Retrieves and validates the role configuration for the profile."""
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
source_profile = profile.get('source_profile')
role_arn = profile['role_arn']
credential_source = profile.get('credential_source')
mfa_serial = profile.get('mfa_serial')
external_id = profile.get('external_id')
role_session_name = profile.get('role_session_name')
duration_seconds = profile.get('duration_seconds')
role_config = {
'role_arn': role_arn,
'external_id': external_id,
'mfa_serial': mfa_serial,
'role_session_name': role_session_name,
'source_profile': source_profile,
'credential_source': credential_source
}
if duration_seconds is not None:
try:
role_config['duration_seconds'] = int(duration_seconds)
except ValueError:
pass
# Either the credential source or the source profile must be
# specified, but not both.
if credential_source is not None and source_profile is not None:
raise InvalidConfigError(
error_msg=(
'The profile "%s" contains both source_profile and '
'credential_source.' % profile_name
)
)
elif credential_source is None and source_profile is None:
raise PartialCredentialsError(
provider=self.METHOD,
cred_var='source_profile or credential_source'
)
elif credential_source is not None:
self._validate_credential_source(
profile_name, credential_source)
else:
self._validate_source_profile(profile_name, source_profile)
return role_config
def _validate_credential_source(self, parent_profile, credential_source):
if self._credential_sourcer is None:
raise InvalidConfigError(error_msg=(
'The credential_source "%s" is specified in profile "%s", '
'but no source provider was configured.' % (
credential_source, parent_profile)
))
if not self._credential_sourcer.is_supported(credential_source):
raise InvalidConfigError(error_msg=(
'The credential source "%s" referenced in profile "%s" is not '
'valid.' % (credential_source, parent_profile)
))
def _source_profile_has_credentials(self, profile):
return any([
self._has_static_credentials(profile),
self._has_assume_role_config_vars(profile),
])
def _validate_source_profile(self, parent_profile_name,
source_profile_name):
profiles = self._loaded_config.get('profiles', {})
if source_profile_name not in profiles:
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" referenced in '
'the profile "%s" does not exist.' % (
source_profile_name, parent_profile_name)
)
)
source_profile = profiles[source_profile_name]
# Make sure we aren't going into an infinite loop. If we haven't
# visited the profile yet, we're good.
if source_profile_name not in self._visited_profiles:
return
# If we have visited the profile and the profile isn't simply
# referencing itself, that's an infinite loop.
if source_profile_name != parent_profile_name:
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
# A profile is allowed to reference itself so that it can source
# static credentials and have configuration all in the same
# profile. This will only ever work for the top level assume
# role because the static credentials will otherwise take
# precedence.
if not self._has_static_credentials(source_profile):
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
def _has_static_credentials(self, profile):
static_keys = ['aws_secret_access_key', 'aws_access_key_id']
return any(static_key in profile for static_key in static_keys)
def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return self._resolve_credentials_from_profile(source_profile)
def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile) and \
not self._profile_provider_builder:
# This is only here for backwards compatibility. If this provider
# isn't given a profile provider builder we still want to be able
# handle the basic static credential case as we would before the
# provile provider builder parameter was added.
return self._resolve_static_credentials_from_profile(profile)
elif self._has_static_credentials(profile) or \
not self._has_assume_role_config_vars(profile):
profile_providers = self._profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=True,
)
profile_chain = CredentialResolver(profile_providers)
credentials = profile_chain.load_credentials()
if credentials is None:
error_message = (
'The source profile "%s" must have credentials.'
)
raise InvalidConfigError(
error_msg=error_message % profile_name,
)
return credentials
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return Credentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class AssumeRoleWithWebIdentityProvider(CredentialProvider):
METHOD = 'assume-role-with-web-identity'
CANONICAL_NAME = None
_CONFIG_TO_ENV_VAR = {
'web_identity_token_file': 'AWS_WEB_IDENTITY_TOKEN_FILE',
'role_session_name': 'AWS_ROLE_SESSION_NAME',
'role_arn': 'AWS_ROLE_ARN',
}
def __init__(
self,
load_config,
client_creator,
profile_name,
cache=None,
disable_env_vars=False,
token_loader_cls=None,
):
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
self._profile_config = None
self._disable_env_vars = disable_env_vars
if token_loader_cls is None:
token_loader_cls = FileWebIdentityTokenLoader
self._token_loader_cls = token_loader_cls
def load(self):
return self._assume_role_with_web_identity()
def _get_profile_config(self, key):
if self._profile_config is None:
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
self._profile_config = profiles.get(self._profile_name, {})
return self._profile_config.get(key)
def _get_env_config(self, key):
if self._disable_env_vars:
return None
env_key = self._CONFIG_TO_ENV_VAR.get(key)
if env_key and env_key in os.environ:
return os.environ[env_key]
return None
def _get_config(self, key):
env_value = self._get_env_config(key)
if env_value is not None:
return env_value
return self._get_profile_config(key)
def _assume_role_with_web_identity(self):
token_path = self._get_config('web_identity_token_file')
if not token_path:
return None
token_loader = self._token_loader_cls(token_path)
role_arn = self._get_config('role_arn')
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = self._get_config('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
class CanonicalNameCredentialSourcer(object):
def __init__(self, providers):
self._providers = providers
def is_supported(self, source_name):
"""Validates a given source name.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: bool
:returns: True if the credential provider is supported,
False otherwise.
"""
return source_name in [p.CANONICAL_NAME for p in self._providers]
def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, CredentialResolver):
return source.load_credentials()
return source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return CredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
def _get_provider_by_canonical_name(self, canonical_name):
"""Return a credential provider by its canonical name.
This function is strict, it does not attempt to address
compatibility issues.
"""
for provider in self._providers:
name = provider.CANONICAL_NAME
# Canonical names are case-insensitive
if name and name.lower() == canonical_name.lower():
return provider
def _get_provider_by_method(self, method):
"""Return a credential provider by its METHOD name."""
for provider in self._providers:
if provider.METHOD == method:
return provider
class ContainerProvider(CredentialProvider):
METHOD = 'container-role'
CANONICAL_NAME = 'EcsContainer'
ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI'
ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN'
def __init__(self, environ=None, fetcher=None):
if environ is None:
environ = os.environ
if fetcher is None:
fetcher = ContainerMetadataFetcher()
self._environ = environ
self._fetcher = fetcher
def load(self):
# This cred provider is only triggered if the self.ENV_VAR is set,
# which only happens if you opt into this feature.
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return self._retrieve_or_fail()
def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = fetcher()
return RefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _build_headers(self):
headers = {}
auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN)
if auth_token is not None:
return {
'Authorization': auth_token
}
def _create_fetcher(self, full_uri, headers):
def fetch_creds():
try:
response = self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
def _provided_relative_uri(self):
return self.ENV_VAR in self._environ
class CredentialResolver(object):
def __init__(self, providers):
"""
:param providers: A list of ``CredentialProvider`` instances.
"""
self.providers = providers
def insert_before(self, name, credential_provider):
"""
Inserts a new instance of ``CredentialProvider`` into the chain that
will be tried before an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials before. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
try:
offset = [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
self.providers.insert(offset, credential_provider)
def insert_after(self, name, credential_provider):
"""
Inserts a new type of ``Credentials`` instance into the chain that will
be tried after an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials after. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
offset = self._get_provider_offset(name)
self.providers.insert(offset + 1, credential_provider)
def remove(self, name):
"""
Removes a given ``Credentials`` instance from the chain.
:param name: The short name of the credentials instance to remove.
:type name: string
"""
available_methods = [p.METHOD for p in self.providers]
if name not in available_methods:
# It's not present. Fail silently.
return
offset = available_methods.index(name)
self.providers.pop(offset)
def get_provider(self, name):
"""Return a credential provider by name.
:type name: str
:param name: The name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
return self.providers[self._get_provider_offset(name)]
def _get_provider_offset(self, name):
try:
return [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
class SSOCredentialFetcher(CachedCredentialFetcher):
_UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, start_url, sso_region, role_name, account_id,
client_creator, token_loader=None, cache=None,
expiry_window_seconds=None):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super(SSOCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = {
'startUrl': self._start_url,
'roleName': self._role_name,
'accountId': self._account_id,
}
# NOTE: It would be good to hoist this cache key construction logic
# into the CachedCredentialFetcher class as we should be consistent.
# Unfortunately, the current assume role fetchers that sub class don't
# pass separators resulting in non-minified JSON. In the long term,
# all fetchers should use the below caching scheme.
args = json.dumps(args, sort_keys=True, separators=(',', ':'))
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms):
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return timestamp.strftime(self._UTC_DATE_FORMAT)
def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(
signature_version=UNSIGNED,
region_name=self._sso_region,
)
client = self._client_creator('sso', config=config)
kwargs = {
'roleName': self._role_name,
'accountId': self._account_id,
'accessToken': self._token_loader(self._start_url),
}
try:
response = client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException:
raise UnauthorizedSSOTokenError()
credentials = response['roleCredentials']
credentials = {
'ProviderType': 'sso',
'Credentials': {
'AccessKeyId': credentials['accessKeyId'],
'SecretAccessKey': credentials['secretAccessKey'],
'SessionToken': credentials['sessionToken'],
'Expiration': self._parse_timestamp(credentials['expiration']),
}
}
return credentials
class SSOProvider(CredentialProvider):
METHOD = 'sso'
_SSO_TOKEN_CACHE_DIR = os.path.expanduser(
os.path.join('~', '.aws', 'sso', 'cache')
)
_SSO_CONFIG_VARS = [
'sso_start_url',
'sso_region',
'sso_role_name',
'sso_account_id',
]
def __init__(self, load_config, client_creator, profile_name,
cache=None, token_cache=None):
if token_cache is None:
token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR)
self._token_cache = token_cache
if cache is None:
cache = {}
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
def _load_sso_config(self):
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
profile_name = self._profile_name
profile_config = profiles.get(self._profile_name, {})
if all(c not in profile_config for c in self._SSO_CONFIG_VARS):
return None
config = {}
missing_config_vars = []
for config_var in self._SSO_CONFIG_VARS:
if config_var in profile_config:
config[config_var] = profile_config[config_var]
else:
missing_config_vars.append(config_var)
if missing_config_vars:
missing = ', '.join(missing_config_vars)
raise InvalidConfigError(
error_msg=(
'The profile "%s" is configured to use SSO but is missing '
'required configuration: %s' % (profile_name, missing)
)
)
return config
def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = SSOCredentialFetcher(
sso_config['sso_start_url'],
sso_config['sso_region'],
sso_config['sso_role_name'],
sso_config['sso_account_id'],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=sso_fetcher.fetch_credentials,
)
| apache-2.0 |
rquellh/thewanderingconsultant | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/_mapping.py | 263 | 5508 | # -*- coding: utf-8 -*-
"""
pygments.formatters._mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter mapping defintions. This file is generated by itself. Everytime
you change something on a builtin formatter defintion, run this script from
the formatters folder to update it.
Do not alter the FORMATTERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# start
from pygments.formatters.bbcode import BBCodeFormatter
from pygments.formatters.html import HtmlFormatter
from pygments.formatters.img import BmpImageFormatter
from pygments.formatters.img import GifImageFormatter
from pygments.formatters.img import ImageFormatter
from pygments.formatters.img import JpgImageFormatter
from pygments.formatters.latex import LatexFormatter
from pygments.formatters.other import NullFormatter
from pygments.formatters.other import RawTokenFormatter
from pygments.formatters.rtf import RtfFormatter
from pygments.formatters.svg import SvgFormatter
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
FORMATTERS = {
BBCodeFormatter: ('BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
BmpImageFormatter: ('img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
GifImageFormatter: ('img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
HtmlFormatter: ('HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
ImageFormatter: ('img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
JpgImageFormatter: ('img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
LatexFormatter: ('LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
NullFormatter: ('Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
RawTokenFormatter: ('Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
RtfFormatter: ('RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft\xc2\xae Word\xc2\xae documents.'),
SvgFormatter: ('SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
Terminal256Formatter: ('Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
TerminalFormatter: ('Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.')
}
if __name__ == '__main__':
import sys
import os
# lookup formatters
found_formatters = []
imports = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pygments.util import docstring_headline
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters.%s' % filename[:-3]
print module_name
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
imports.append((module_name, formatter_name))
formatter = getattr(module, formatter_name)
found_formatters.append(
'%s: %r' % (formatter_name,
(formatter.name,
tuple(formatter.aliases),
tuple(formatter.filenames),
docstring_headline(formatter))))
# sort them, that should make the diff files for svn smaller
found_formatters.sort()
imports.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('# start')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'w')
f.write(header)
f.write('# start\n')
f.write('\n'.join(['from %s import %s' % imp for imp in imports]))
f.write('\n\n')
f.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters))
f.write(footer)
f.close()
| mit |
jacobq/csci5221-viro-project | tests/unit/openflow/nicira_test.py | 45 | 7282 | # Copyright 2011-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
sys.path.append(os.path.dirname(__file__) + "/../../..")
import pox.openflow.nicira as nx
from pox.lib.addresses import EthAddr, IPAddr
import pox.openflow.libopenflow_01 as of
class basics_test (unittest.TestCase):
"""
Do some tests on the Nicira extensions
This isn't totally comprehensive (that is, we don't currently try every
combination of masked/unmasked, etc. But it should serve as a basic
sanity test.
"""
longMessage = True
# Add an _init_action_XXXX method to override action creation
# (otherwise they're initialized with no arguments).
def _init_action_nx_reg_move (self, cls):
return cls(dst=nx.NXM_NX_REG1,src=nx.NXM_NX_REG2,nbits=3,src_ofs=2)
def _init_action_nx_reg_load (self, cls):
return cls(dst=nx.NXM_NX_REG3,value=42,nbits=16)
def _init_action_nx_output_reg (self, cls):
return cls(reg=nx.NXM_NX_TUN_ID,nbits=16)
def _init_action_nx_action_resubmit (self, cls):
# Use a factory method
return cls.resubmit_table()
def _init_action_nx_action_set_tunnel (self, cls):
return cls(tun_id=101)
def _init_action_nx_action_set_tunnel64 (self, cls):
return cls(tun_id=101)
def _init_action_nx_action_learn (self, cls):
learn = self._make_learn_action()
assert type(learn)==cls
return learn
def _init_action_nx_action_pop_mpls (self, cls):
return cls(ethertype=101)
def test_unpack_weird_header (self):
"""
Test the unpacking of a header we don't have a class for
"""
# Make a weird header...
class nxm_weird (nx._nxm_maskable, nx._nxm_numeric_entry):
_nxm_type = nx._make_type(0xdead,0x42)
_nxm_length = 4
original = nx.nx_reg_load(dst=nxm_weird,value=42,nbits=32)
original_packed = original.pack()
# Currently, the action unpacking API still sucks...
unoriginal = nx.nx_reg_load()
offset = unoriginal.unpack(original_packed, 0)
self.assertEqual(offset, len(original_packed),
"Didn't unpack entire entry")
unoriginal_packed = unoriginal.pack()
self.assertEqual(unoriginal.dst.__name__, "NXM_UNKNOWN_dead_42",
"Didn't generate new class correctly?")
self.assertEqual(original_packed, unoriginal_packed, "Pack/Unpack failed")
def test_action_pack_unpack (self):
"""
Pack and unpack a bunch of actions
"""
for name in dir(nx):
a = getattr(nx, name)
if not nx._issubclass(a, of.ofp_action_vendor_base): continue
print "Trying",name,"...",
init = getattr(self, "_init_action_" + name, lambda c: c())
original = init(a)
original_packed = original.pack()
#print len(original_packed)
# Currently, the action unpacking API still sucks...
unoriginal = a()
offset = unoriginal.unpack(original_packed, 0)
self.assertEqual(offset, len(original_packed),
"Didn't unpack entire entry " + name)
unoriginal_packed = unoriginal.pack()
self.assertEqual(original, unoriginal,
"Pack/Unpack failed for " + name)
print "Success!"
def test_nxm_ip (self):
"""
Test the check for nonzero bits of a masked entry
"""
def try_bad ():
e = nx.NXM_OF_IP_SRC(IPAddr("192.168.56.1"),IPAddr("255.255.255.0"))
e.pack()
self.assertRaisesRegexp(AssertionError, '^nonzero masked bits$',
try_bad)
def _make_learn_action (self):
fms = nx.flow_mod_spec.new
learn = nx.nx_action_learn(table_id=1,hard_timeout=10)
learn.spec.append(fms( field=nx.NXM_OF_VLAN_TCI, n_bits=12 ))
learn.spec.append(fms( field=nx.NXM_OF_ETH_SRC, match=nx.NXM_OF_ETH_DST ))
learn.spec.append(fms( field=nx.NXM_OF_IN_PORT, output=True ))
#learn.spec = [
# nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_VLAN_TCI),
# n_bits=12),
# nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_ETH_SRC),
# dst=nx.nx_learn_dst_match(nx.NXM_OF_ETH_DST)),
# nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_IN_PORT),
# dst=nx.nx_learn_dst_output())
#]
#learn.spec.chain(
# field=nx.NXM_OF_VLAN_TCI, n_bits=12).chain(
# field=nx.NXM_OF_ETH_SRC, match=nx.NXM_OF_ETH_DST).chain(
# field=nx.NXM_OF_IN_PORT, output=True)
return learn
def test_flow_mod_spec (self):
"""
Check flow_mod_specs are correct
Not comprehensive.
"""
learn = self._make_learn_action()
good = """00 0c 00 00 08 02 00 00 00 00 08 02 00 00
00 30 00 00 04 06 00 00 00 00 02 06 00 00
10 10 00 00 00 02 00 00""".split()
good = ''.join([chr(int(x,16)) for x in good])
self.assertEqual(good, ''.join(x.pack() for x in learn.spec))
def test_match_pack_unpack (self):
"""
Pack and unpack a bunch of match entries
"""
# Note that this does not currently really take into account constraints
# on masks (e.g., EthAddr masks only having broadcast bit).
for nxm_name,nxm_type in nx._nxm_name_to_type.items():
nxm_class = nx._nxm_type_to_class[nxm_type]
mask = None
#print nxm_name
# If more exotic nxm types are added (e.g., with different types for
# values and masks), we'll need to add additional if statements here...
if issubclass(nxm_class, nx._nxm_numeric_entry):
value = 0x0a
mask = 0x0f
elif issubclass(nxm_class, nx._nxm_raw):
value = 'aabb'
# Currently never check mask for raw
elif issubclass(nxm_class, nx._nxm_ipv6):
import pox.lib.addresses as addresses
#self.assertFalse('IPAddr6' in dir(addresses), 'IPv6 is available, '
# 'so this test needs to be fixed.')
value = 'ff02::/126'
elif issubclass(nxm_class, nx._nxm_ip):
value = IPAddr('192.168.56.0')
mask = IPAddr('255.255.255.0')
elif issubclass(nxm_class, nx._nxm_ether):
value = EthAddr('01:02:03:04:05:06')
# Currently never check mask for ether
else:
self.fail("Unsupported NXM type for " + nxm_name)
if not issubclass(nxm_class, nx._nxm_maskable):
mask = None
original = nxm_class(value, mask)
original_packed = original.pack()
offset,unoriginal = nx.nxm_entry.unpack_new(original_packed, 0)
self.assertEqual(offset, len(original_packed),
"Didn't unpack entire entry " + nxm_name)
unoriginal_packed = unoriginal.pack()
self.assertEqual(original, unoriginal,
"Pack/Unpack failed for " + nxm_name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
utamaro/youtube-dl | youtube_dl/extractor/comedycentral.py | 92 | 12008 | from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/
(?:playlist_tds_extended_)?(?P<interview_title>[^/?#]*?)
(?:/[^/?#]?|[?#]|$))))
'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/b6364d/sarah-chayes-extended-interview',
'info_dict': {
'id': 'sarah-chayes-extended-interview',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'title': 'thedailyshow Sarah Chayes Extended Interview',
},
'playlist': [
{
'info_dict': {
'id': '0baad492-cbec-4ec1-9e50-ad91c291127f',
'ext': 'mp4',
'upload_date': '20150129',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'uploader': 'thedailyshow',
'title': 'thedailyshow sarah-chayes-extended-interview part 1',
},
},
{
'info_dict': {
'id': '1e4fb91b-8ce7-4277-bd7c-98c9f1bbd283',
'ext': 'mp4',
'upload_date': '20150129',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'uploader': 'thedailyshow',
'title': 'thedailyshow sarah-chayes-extended-interview part 2',
},
},
],
'params': {
'skip_download': True,
},
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/t6d9sg/the-daily-show-20038-highlights/be3cwo',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
subtitles = self._extract_subtitles(cdoc, guid)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
'subtitles': subtitles,
})
return {
'_type': 'playlist',
'id': epTitle,
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
| unlicense |
Matthie456/Bon_DenDuijn | SpatialDecision/external/networkx/classes/multidigraph.py | 20 | 33429 | """Base class for MultiDiGraph."""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from copy import deepcopy
import networkx as nx
from networkx.classes.graph import Graph # for doctests
from networkx.classes.digraph import DiGraph
from networkx.classes.multigraph import MultiGraph
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
class MultiDiGraph(MultiGraph,DiGraph):
"""A directed graph class that can store multiedges.
Multiedges are multiple edges between two nodes. Each edge
can hold optional data or attributes.
A MultiDiGraph holds directed edges. Self loops are allowed.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
DiGraph
MultiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.MultiDiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. If an edge already exists, an additional
edge is created and stored using a key to identify the edge.
By default the key is the lowest unused integer.
>>> G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
>>> G[4]
{5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.MultiDiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> G.nodes(data=True)
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2][0]['weight'] = 4.7
>>> G.edge[1][2][0]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
>>> G[1] # adjacency dict keyed by neighbor to edge attributes
... # Note: you should not change this dict manually!
{2: {0: {'weight': 4}, 1: {'color': 'blue'}}}
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,keydict in nbrsdict.items():
... for key,eattr in keydict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 3, 8)
>>> G.edges(data='weight')
[(1, 2, 4), (1, 2, None), (2, 3, 8), (3, 4, None), (4, 5, None)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The MultiDiGraph class uses a dict-of-dict-of-dict-of-dict structure.
The outer dict (node_dict) holds adjacency lists keyed by node.
The next dict (adjlist) represents the adjacency list and holds
edge_key dicts keyed by neighbor. The edge_key dict holds each edge_attr
dict keyed by edge key. The inner dict (edge_attr) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these four dicts in the dict-of-dict-of-dict-of-dict
structure can be replaced by a user defined dict-like object.
In general, the dict-like features should be maintained but
extra features can be added. To replace one of the dicts create
a new graph class by changing the class(!) variable holding the
factory for that dict-like structure. The variable names
are node_dict_factory, adjlist_dict_factory, edge_key_dict_factory
and edge_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency lists keyed by node.
It should require no arguments and return a dict-like object.
adjlist_dict_factory : function, (default: dict)
Factory function to be used to create the adjacency list
dict which holds multiedge key dicts keyed by neighbor.
It should require no arguments and return a dict-like object.
edge_key_dict_factory : function, (default: dict)
Factory function to be used to create the edge key dict
which holds edge data keyed by edge key.
It should require no arguments and return a dict-like object.
edge_attr_dict_factory : function, (default: dict)
Factory function to be used to create the edge attribute
dict which holds attrbute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Examples
--------
Create a multigraph object that tracks the order nodes are added.
>>> from collections import OrderedDict
>>> class OrderedGraph(nx.MultiDiGraph):
... node_dict_factory = OrderedDict
>>> G = OrderedGraph()
>>> G.add_nodes_from( (2,1) )
>>> G.nodes()
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (2,1), (1,1)) )
>>> G.edges()
[(2, 1), (2, 1), (2, 2), (1, 1)]
Create a multdigraph object that tracks the order nodes are added
and for each node track the order that neighbors are added and for
each neighbor tracks the order that multiedges are added.
>>> class OrderedGraph(nx.MultiDiGraph):
... node_dict_factory = OrderedDict
... adjlist_dict_factory = OrderedDict
... edge_key_dict_factory = OrderedDict
>>> G = OrderedGraph()
>>> G.add_nodes_from( (2,1) )
>>> G.nodes()
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1,2,{'weight':0.1}), (2,1,1,{'weight':0.2}), (1,1)) )
>>> G.edges(keys=True)
[(2, 2, 0), (2, 1, 2), (2, 1, 1), (1, 1, 0)]
"""
# node_dict_factory=dict # already assigned in Graph
# adjlist_dict_factory=dict
edge_key_dict_factory = dict
# edge_attr_dict_factory=dict
def __init__(self, data=None, **attr):
self.edge_key_dict_factory = self.edge_key_dict_factory
DiGraph.__init__(self, data, **attr)
def add_edge(self, u, v, key=None, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
key : hashable identifier, optional (default=lowest unused integer)
Used to distinguish multiedges between a pair of nodes.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
To replace/update edge data, use the optional key argument
to identify a unique edge. Otherwise a new edge will be created.
NetworkX algorithms designed for weighted graphs cannot use
multigraphs directly because it is not clear how to handle
multiedge weights. Convert to Graph using edge attribute
'weight' to enable weighted graph algorithms.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.MultiDiGraph()
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 2, key=0, weight=4) # update data for key=0
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.succ:
self.succ[u] = self.adjlist_dict_factory()
self.pred[u] = self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.succ:
self.succ[v] = self.adjlist_dict_factory()
self.pred[v] = self.adjlist_dict_factory()
self.node[v] = {}
if v in self.succ[u]:
keydict = self.adj[u][v]
if key is None:
# find a unique integer key
# other methods might be better here?
key = len(keydict)
while key in keydict:
key += 1
datadict = keydict.get(key, self.edge_key_dict_factory())
datadict.update(attr_dict)
keydict[key] = datadict
else:
# selfloops work this way without special treatment
if key is None:
key = 0
datadict = self.edge_attr_dict_factory()
datadict.update(attr_dict)
keydict = self.edge_key_dict_factory()
keydict[key] = datadict
self.succ[u][v] = keydict
self.pred[v][u] = keydict
def remove_edge(self, u, v, key=None):
"""Remove an edge between u and v.
Parameters
----------
u,v: nodes
Remove an edge between nodes u and v.
key : hashable identifier, optional (default=None)
Used to distinguish multiple edges between a pair of nodes.
If None remove a single (abritrary) edge between u and v.
Raises
------
NetworkXError
If there is not an edge between u and v, or
if there is no edge with the specified key.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
For multiple edges
>>> G = nx.MultiDiGraph()
>>> G.add_edges_from([(1,2),(1,2),(1,2)])
>>> G.remove_edge(1,2) # remove a single (arbitrary) edge
For edges with keys
>>> G = nx.MultiDiGraph()
>>> G.add_edge(1,2,key='first')
>>> G.add_edge(1,2,key='second')
>>> G.remove_edge(1,2,key='second')
"""
try:
d = self.adj[u][v]
except (KeyError):
raise NetworkXError(
"The edge %s-%s is not in the graph." % (u, v))
# remove the edge with specified data
if key is None:
d.popitem()
else:
try:
del d[key]
except (KeyError):
raise NetworkXError(
"The edge %s-%s with key %s is not in the graph." % (u, v, key))
if len(d) == 0:
# remove the key entries if last edge
del self.succ[u][v]
del self.pred[v][u]
def edges_iter(self, nbunch=None, data=False, keys=False, default=None):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data and keys
in the order (node, neighbor, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u,v,ddict[data]).
If True, return edge attribute dict in 3-tuple (u,v,ddict).
If False, return 2-tuple (u,v).
keys : bool, optional (default=False)
If True, return edge keys with each edge.
default : value, optional (default=None)
Value used for edges that dont have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2])
>>> G.add_edge(2,3,weight=5)
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]
>>> list(G.edges_iter(data='weight', default=1))
[(0, 1, 1), (1, 2, 1), (2, 3, 5)]
>>> list(G.edges(keys=True)) # default keys are integers
[(0, 1, 0), (1, 2, 0), (2, 3, 0)]
>>> list(G.edges(data=True,keys=True)) # default keys are integers
[(0, 1, 0, {}), (1, 2, 0, {}), (2, 3, 0, {'weight': 5})]
>>> list(G.edges(data='weight',default=1,keys=True))
[(0, 1, 0, 1), (1, 2, 0, 1), (2, 3, 0, 5)]
>>> list(G.edges_iter([0,2]))
[(0, 1), (2, 3)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs = ((n, self.adj[n]) for n in self.nbunch_iter(nbunch))
if data is True:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, ddict in keydict.items():
yield (n, nbr, key, ddict) if keys else (n, nbr, ddict)
elif data is not False:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, ddict in keydict.items():
d = ddict[data] if data in ddict else default
yield (n, nbr, key, d) if keys else (n, nbr, d)
else:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key in keydict:
yield (n, nbr, key) if keys else (n, nbr)
# alias out_edges to edges
out_edges_iter = edges_iter
def out_edges(self, nbunch=None, keys=False, data=False):
"""Return a list of the outgoing edges.
Edges are returned as tuples with optional data and keys
in the order (node, neighbor, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
out_edges : list
An listr of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs edges() is the same as out_edges().
See Also
--------
in_edges: return a list of incoming edges
"""
return list(self.out_edges_iter(nbunch, keys=keys, data=data))
def in_edges_iter(self, nbunch=None, data=False, keys=False):
"""Return an iterator over the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
in_edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
See Also
--------
edges_iter : return an iterator of edges
"""
if nbunch is None:
nodes_nbrs = self.pred.items()
else:
nodes_nbrs = ((n, self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, data in keydict.items():
if keys:
yield (nbr, n, key, data)
else:
yield (nbr, n, data)
else:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, data in keydict.items():
if keys:
yield (nbr, n, key)
else:
yield (nbr, n)
def in_edges(self, nbunch=None, keys=False, data=False):
"""Return a list of the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
in_edges : list
A list of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
See Also
--------
out_edges: return a list of outgoing edges
"""
return list(self.in_edges_iter(nbunch, keys=keys, data=data))
def degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs = ( (n, succ, self.pred[n])
for n,succ in self.succ.items() )
else:
nodes_nbrs = ( (n, self.succ[n], self.pred[n])
for n in self.nbunch_iter(nbunch))
if weight is None:
for n, succ, pred in nodes_nbrs:
indeg = sum([len(data) for data in pred.values()])
outdeg = sum([len(data) for data in succ.values()])
yield (n, indeg + outdeg)
else:
# edge weighted graph - degree is sum of nbr edge weights
for n, succ, pred in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in pred.values()
for d in data.values()])
deg += sum([d.get(weight, 1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
def in_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, in-degree).
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, in_degree, out_degree, out_degree_iter
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.in_degree_iter(0)) # node 0 with degree 0
[(0, 0)]
>>> list(G.in_degree_iter([0,1]))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs = self.pred.items()
else:
nodes_nbrs = ((n, self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n, nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]))
else:
# edge weighted graph - degree is sum of nbr edge weights
for n, pred in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in pred.values()
for d in data.values()])
yield (n, deg)
def out_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, out-degree).
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.out_degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.out_degree_iter([0,1]))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs = self.succ.items()
else:
nodes_nbrs = ((n, self.succ[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n, nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]))
else:
for n, succ in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return True
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return True
def to_directed(self):
"""Return a directed copy of the graph.
Returns
-------
G : MultiDiGraph
A deepcopy of the graph.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.MultiDiGraph()
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
return deepcopy(self)
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
Returns
-------
G : MultiGraph
An undirected graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed MultiGraph to use dict-like objects
in the data structure, those changes do not transfer to the MultiDiGraph
created by this method.
"""
H = MultiGraph()
H.name = self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from((u, v, key, deepcopy(data))
for u, nbrs in self.adjacency_iter()
for v, keydict in nbrs.items()
for key, data in keydict.items()
if self.has_edge(v, u, key))
else:
H.add_edges_from((u, v, key, deepcopy(data))
for u, nbrs in self.adjacency_iter()
for v, keydict in nbrs.items()
for key, data in keydict.items())
H.graph = deepcopy(self.graph)
H.node = deepcopy(self.node)
return H
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n] = self.node[n]
# namespace shortcuts for speed
H_succ = H.succ
H_pred = H.pred
self_succ = self.succ
self_pred = self.pred
# add nodes
for n in H:
H_succ[n] = H.adjlist_dict_factory()
H_pred[n] = H.adjlist_dict_factory()
# add edges
for u in H_succ:
Hnbrs = H_succ[u]
for v, edgedict in self_succ[u].items():
if v in H_succ:
# add both representations of edge: u-v and v-u
# they share the same edgedict
ed = edgedict.copy()
Hnbrs[v] = ed
H_pred[v][u] = ed
H.graph = self.graph
return H
def reverse(self, copy=True):
"""Return the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, reverse the reverse graph is created using
the original graph (this changes the original graph).
"""
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.add_nodes_from(self)
H.add_edges_from((v, u, k, deepcopy(d)) for u, v, k, d
in self.edges(keys=True, data=True))
H.graph = deepcopy(self.graph)
H.node = deepcopy(self.node)
else:
self.pred, self.succ = self.succ, self.pred
self.adj = self.succ
H = self
return H
| gpl-2.0 |
Dhivyap/ansible | lib/ansible/modules/cloud/vmware/vmware_cluster_ha.py | 10 | 21392 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_cluster_ha
short_description: Manage High Availability (HA) on VMware vSphere clusters
description:
- Manages HA configuration on VMware vSphere clusters.
- All values and VMware object names are case sensitive.
version_added: '2.9'
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
requirements:
- Tested on ESXi 5.5 and 6.5.
- PyVmomi installed.
options:
cluster_name:
description:
- The name of the cluster to be managed.
type: str
required: yes
datacenter:
description:
- The name of the datacenter.
type: str
required: yes
aliases: [ datacenter_name ]
enable_ha:
description:
- Whether to enable HA.
type: bool
default: 'no'
ha_host_monitoring:
description:
- Whether HA restarts virtual machines after a host fails.
- If set to C(enabled), HA restarts virtual machines after a host fails.
- If set to C(disabled), HA does not restart virtual machines after a host fails.
- If C(enable_ha) is set to C(no), then this value is ignored.
type: str
choices: [ 'enabled', 'disabled' ]
default: 'enabled'
ha_vm_monitoring:
description:
- State of virtual machine health monitoring service.
- If set to C(vmAndAppMonitoring), HA response to both virtual machine and application heartbeat failure.
- If set to C(vmMonitoringDisabled), virtual machine health monitoring is disabled.
- If set to C(vmMonitoringOnly), HA response to virtual machine heartbeat failure.
- If C(enable_ha) is set to C(no), then this value is ignored.
type: str
choices: ['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled']
default: 'vmMonitoringDisabled'
host_isolation_response:
description:
- Indicates whether or VMs should be powered off if a host determines that it is isolated from the rest of the compute resource.
- If set to C(none), do not power off VMs in the event of a host network isolation.
- If set to C(powerOff), power off VMs in the event of a host network isolation.
- If set to C(shutdown), shut down VMs guest operating system in the event of a host network isolation.
type: str
choices: ['none', 'powerOff', 'shutdown']
default: 'none'
slot_based_admission_control:
description:
- Configure slot based admission control policy.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_level:
description:
- Number of host failures that should be tolerated.
type: int
required: true
type: dict
reservation_based_admission_control:
description:
- Configure reservation based admission control policy.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_level:
description:
- Number of host failures that should be tolerated.
type: int
required: true
auto_compute_percentages:
description:
- By default, C(failover_level) is used to calculate C(cpu_failover_resources_percent) and C(memory_failover_resources_percent).
If a user wants to override the percentage values, he has to set this field to false.
type: bool
default: true
cpu_failover_resources_percent:
description:
- Percentage of CPU resources in the cluster to reserve for failover.
Ignored if C(auto_compute_percentages) is not set to false.
type: int
default: 50
memory_failover_resources_percent:
description:
- Percentage of memory resources in the cluster to reserve for failover.
Ignored if C(auto_compute_percentages) is not set to false.
type: int
default: 50
type: dict
failover_host_admission_control:
description:
- Configure dedicated failover hosts.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_hosts:
description:
- List of dedicated failover hosts.
type: list
required: true
type: dict
ha_vm_failure_interval:
description:
- The number of seconds after which virtual machine is declared as failed
if no heartbeat has been received.
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
type: int
default: 30
ha_vm_min_up_time:
description:
- The number of seconds for the virtual machine's heartbeats to stabilize after
the virtual machine has been powered on.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
type: int
default: 120
ha_vm_max_failures:
description:
- Maximum number of failures and automated resets allowed during the time
that C(ha_vm_max_failure_window) specifies.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
type: int
default: 3
ha_vm_max_failure_window:
description:
- The number of seconds for the window during which up to C(ha_vm_max_failures) resets
can occur before automated responses stop.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
- Default specifies no failure window.
type: int
default: -1
ha_restart_priority:
description:
- Priority HA gives to a virtual machine if sufficient capacity is not available
to power on all failed virtual machines.
- Valid only if I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- If set to C(disabled), then HA is disabled for this virtual machine.
- If set to C(high), then virtual machine with this priority have a higher chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
- If set to C(medium), then virtual machine with this priority have an intermediate chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
- If set to C(low), then virtual machine with this priority have a lower chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
type: str
default: 'medium'
choices: [ 'disabled', 'high', 'low', 'medium' ]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r"""
- name: Enable HA without admission control
vmware_cluster_ha:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: datacenter
cluster_name: cluster
enable_ha: yes
delegate_to: localhost
- name: Enable HA and VM monitoring without admission control
vmware_cluster_ha:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter_name: DC0
cluster_name: "{{ cluster_name }}"
enable_ha: True
ha_vm_monitoring: vmMonitoringOnly
delegate_to: localhost
- name: Enable HA with admission control reserving 50% of resources for HA
vmware_cluster_ha:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: datacenter
cluster_name: cluster
enable_ha: yes
reservation_based_admission_control:
auto_compute_percentages: False
failover_level: 1
cpu_failover_resources_percent: 50
memory_failover_resources_percent: 50
delegate_to: localhost
"""
RETURN = r"""#
"""
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name,
vmware_argument_spec, wait_for_task)
from ansible.module_utils._text import to_native
class VMwareCluster(PyVmomi):
def __init__(self, module):
super(VMwareCluster, self).__init__(module)
self.cluster_name = module.params['cluster_name']
self.datacenter_name = module.params['datacenter']
self.enable_ha = module.params['enable_ha']
self.datacenter = None
self.cluster = None
self.host_isolation_response = getattr(vim.cluster.DasVmSettings.IsolationResponse, self.params.get('host_isolation_response'))
if self.enable_ha and (
self.params.get('slot_based_admission_control') or
self.params.get('reservation_based_admission_control') or
self.params.get('failover_host_admission_control')):
self.ha_admission_control = True
else:
self.ha_admission_control = False
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name)
if self.cluster is None:
self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
def get_failover_hosts(self):
"""
Get failover hosts for failover_host_admission_control policy
Returns: List of ESXi hosts sorted by name
"""
policy = self.params.get('failover_host_admission_control')
hosts = []
all_hosts = dict((h.name, h) for h in self.get_all_hosts_by_cluster(self.cluster_name))
for host in policy.get('failover_hosts'):
if host in all_hosts:
hosts.append(all_hosts.get(host))
else:
self.module.fail_json(msg="Host %s is not a member of cluster %s." % (host, self.cluster_name))
hosts.sort(key=lambda h: h.name)
return hosts
def check_ha_config_diff(self):
"""
Check HA configuration diff
Returns: True if there is diff, else False
"""
das_config = self.cluster.configurationEx.dasConfig
if das_config.enabled != self.enable_ha:
return True
if self.enable_ha and (
das_config.vmMonitoring != self.params.get('ha_vm_monitoring') or
das_config.hostMonitoring != self.params.get('ha_host_monitoring') or
das_config.admissionControlEnabled != self.ha_admission_control or
das_config.defaultVmSettings.restartPriority != self.params.get('ha_restart_priority') or
das_config.defaultVmSettings.isolationResponse != self.host_isolation_response or
das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring != self.params.get('ha_vm_monitoring') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval != self.params.get('ha_vm_failure_interval') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime != self.params.get('ha_vm_min_up_time') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures != self.params.get('ha_vm_max_failures') or
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow != self.params.get('ha_vm_max_failure_window')):
return True
if self.ha_admission_control:
if self.params.get('slot_based_admission_control'):
policy = self.params.get('slot_based_admission_control')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverLevelAdmissionControlPolicy) or \
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
return True
elif self.params.get('reservation_based_admission_control'):
policy = self.params.get('reservation_based_admission_control')
auto_compute_percentages = policy.get('auto_compute_percentages')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverResourcesAdmissionControlPolicy) or \
das_config.admissionControlPolicy.autoComputePercentages != auto_compute_percentages or \
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
return True
if not auto_compute_percentages:
if das_config.admissionControlPolicy.cpuFailoverResourcesPercent != policy.get('cpu_failover_resources_percent') or \
das_config.admissionControlPolicy.memoryFailoverResourcesPercent != policy.get('memory_failover_resources_percent'):
return True
elif self.params.get('failover_host_admission_control'):
policy = self.params.get('failover_host_admission_control')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverHostAdmissionControlPolicy):
return True
das_config.admissionControlPolicy.failoverHosts.sort(key=lambda h: h.name)
if das_config.admissionControlPolicy.failoverHosts != self.get_failover_hosts():
return True
return False
def configure_ha(self):
"""
Manage HA Configuration
"""
changed, result = False, None
if self.check_ha_config_diff():
if not self.module.check_mode:
cluster_config_spec = vim.cluster.ConfigSpecEx()
cluster_config_spec.dasConfig = vim.cluster.DasConfigInfo()
cluster_config_spec.dasConfig.enabled = self.enable_ha
if self.enable_ha:
vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
vm_tool_spec.enabled = True
vm_tool_spec.vmMonitoring = self.params.get('ha_vm_monitoring')
vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')
das_vm_config = vim.cluster.DasVmSettings()
das_vm_config.restartPriority = self.params.get('ha_restart_priority')
das_vm_config.isolationResponse = self.host_isolation_response
das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
cluster_config_spec.dasConfig.defaultVmSettings = das_vm_config
cluster_config_spec.dasConfig.admissionControlEnabled = self.ha_admission_control
if self.ha_admission_control:
if self.params.get('slot_based_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
policy = self.params.get('slot_based_admission_control')
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
elif self.params.get('reservation_based_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverResourcesAdmissionControlPolicy()
policy = self.params.get('reservation_based_admission_control')
auto_compute_percentages = policy.get('auto_compute_percentages')
cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = auto_compute_percentages
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
if not auto_compute_percentages:
cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = \
policy.get('cpu_failover_resources_percent')
cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = \
policy.get('memory_failover_resources_percent')
elif self.params.get('failover_host_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverHostAdmissionControlPolicy()
policy = self.params.get('failover_host_admission_control')
cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = self.get_failover_hosts()
cluster_config_spec.dasConfig.hostMonitoring = self.params.get('ha_host_monitoring')
cluster_config_spec.dasConfig.vmMonitoring = self.params.get('ha_vm_monitoring')
try:
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
changed, result = wait_for_task(task)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=to_native(method_fault.msg))
except TaskError as task_e:
self.module.fail_json(msg=to_native(task_e))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to update cluster"
" due to generic exception %s" % to_native(generic_exc))
else:
changed = True
self.module.exit_json(changed=changed, result=result)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
cluster_name=dict(type='str', required=True),
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
# HA
enable_ha=dict(type='bool', default=False),
ha_host_monitoring=dict(type='str',
default='enabled',
choices=['enabled', 'disabled']),
host_isolation_response=dict(type='str',
default='none',
choices=['none', 'powerOff', 'shutdown']),
# HA VM Monitoring related parameters
ha_vm_monitoring=dict(type='str',
choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'],
default='vmMonitoringDisabled'),
ha_vm_failure_interval=dict(type='int', default=30),
ha_vm_min_up_time=dict(type='int', default=120),
ha_vm_max_failures=dict(type='int', default=3),
ha_vm_max_failure_window=dict(type='int', default=-1),
ha_restart_priority=dict(type='str',
choices=['high', 'low', 'medium', 'disabled'],
default='medium'),
# HA Admission Control related parameters
slot_based_admission_control=dict(type='dict', options=dict(
failover_level=dict(type='int', required=True),
)),
reservation_based_admission_control=dict(type='dict', options=dict(
auto_compute_percentages=dict(type='bool', default=True),
failover_level=dict(type='int', required=True),
cpu_failover_resources_percent=dict(type='int', default=50),
memory_failover_resources_percent=dict(type='int', default=50),
)),
failover_host_admission_control=dict(type='dict', options=dict(
failover_hosts=dict(type='list', elements='str', required=True),
)),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['slot_based_admission_control', 'reservation_based_admission_control', 'failover_host_admission_control']
]
)
vmware_cluster_ha = VMwareCluster(module)
vmware_cluster_ha.configure_ha()
if __name__ == '__main__':
main()
| gpl-3.0 |
FRExLS/geonode | geonode/people/models.py | 20 | 6315 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db import models
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AbstractUser
from django.db.models import signals
from django.conf import settings
from taggit.managers import TaggableManager
from geonode.base.enumerations import COUNTRIES
from geonode.groups.models import GroupProfile
from account.models import EmailAddress
from .utils import format_address
if 'notification' in settings.INSTALLED_APPS:
from notification import models as notification
class Profile(AbstractUser):
"""Fully featured Geonode user"""
organization = models.CharField(
_('Organization Name'),
max_length=255,
blank=True,
null=True,
help_text=_('name of the responsible organization'))
profile = models.TextField(_('Profile'), null=True, blank=True, help_text=_('introduce yourself'))
position = models.CharField(
_('Position Name'),
max_length=255,
blank=True,
null=True,
help_text=_('role or position of the responsible person'))
voice = models.CharField(_('Voice'), max_length=255, blank=True, null=True, help_text=_(
'telephone number by which individuals can speak to the responsible organization or individual'))
fax = models.CharField(_('Facsimile'), max_length=255, blank=True, null=True, help_text=_(
'telephone number of a facsimile machine for the responsible organization or individual'))
delivery = models.CharField(
_('Delivery Point'),
max_length=255,
blank=True,
null=True,
help_text=_('physical and email address at which the organization or individual may be contacted'))
city = models.CharField(
_('City'),
max_length=255,
blank=True,
null=True,
help_text=_('city of the location'))
area = models.CharField(
_('Administrative Area'),
max_length=255,
blank=True,
null=True,
help_text=_('state, province of the location'))
zipcode = models.CharField(
_('Postal Code'),
max_length=255,
blank=True,
null=True,
help_text=_('ZIP or other postal code'))
country = models.CharField(
choices=COUNTRIES,
max_length=3,
blank=True,
null=True,
help_text=_('country of the physical address'))
keywords = TaggableManager(_('keywords'), blank=True, help_text=_(
'commonly used word(s) or formalised word(s) or phrase(s) used to describe the subject \
(space or comma-separated'))
def get_absolute_url(self):
return reverse('profile_detail', args=[self.username, ])
def __unicode__(self):
return u"%s" % (self.username)
def class_name(value):
return value.__class__.__name__
USERNAME_FIELD = 'username'
def group_list_public(self):
return GroupProfile.objects.exclude(access="private").filter(groupmember__user=self)
def group_list_all(self):
return GroupProfile.objects.filter(groupmember__user=self)
def keyword_list(self):
"""
Returns a list of the Profile's keywords.
"""
return [kw.name for kw in self.keywords.all()]
@property
def name_long(self):
if self.first_name and self.last_name:
return '%s %s (%s)' % (self.first_name, self.last_name, self.username)
elif (not self.first_name) and self.last_name:
return '%s (%s)' % (self.last_name, self.username)
elif self.first_name and (not self.last_name):
return '%s (%s)' % (self.first_name, self.username)
else:
return self.username
@property
def location(self):
return format_address(self.delivery, self.zipcode, self.city, self.area, self.country)
def get_anonymous_user_instance(Profile):
return Profile(username='AnonymousUser')
def profile_post_save(instance, sender, **kwargs):
"""
Make sure the user belongs by default to the anonymous group.
This will make sure that anonymous permissions will be granted to the new users.
"""
from django.contrib.auth.models import Group
anon_group, created = Group.objects.get_or_create(name='anonymous')
instance.groups.add(anon_group)
# keep in sync Profile email address with Account email address
if instance.email not in [u'', '', None] and not kwargs.get('raw', False):
address, created = EmailAddress.objects.get_or_create(
user=instance, primary=True,
defaults={'email': instance.email, 'verified': False})
if not created:
EmailAddress.objects.filter(user=instance, primary=True).update(email=instance.email)
def email_post_save(instance, sender, **kw):
if instance.primary:
Profile.objects.filter(id=instance.user.pk).update(email=instance.email)
def profile_pre_save(instance, sender, **kw):
matching_profiles = Profile.objects.filter(id=instance.id)
if matching_profiles.count() == 0:
return
if instance.is_active and not matching_profiles.get().is_active and \
'notification' in settings.INSTALLED_APPS:
notification.send([instance, ], "account_active")
signals.pre_save.connect(profile_pre_save, sender=Profile)
signals.post_save.connect(profile_post_save, sender=Profile)
signals.post_save.connect(email_post_save, sender=EmailAddress)
| gpl-3.0 |
Donkyhotay/MoonPy | zope/server/linereceiver/linetask.py | 1 | 2064 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Line Task
$Id: linetask.py 27442 2004-09-03 08:16:55Z shane $
"""
import socket
import time
from zope.server.interfaces import ITask
from zope.interface import implements
class LineTask(object):
"""This is a generic task that can be used with command line
protocols to handle commands in a separate thread.
"""
implements(ITask)
def __init__(self, channel, command, m_name):
self.channel = channel
self.m_name = m_name
self.args = command.args
self.close_on_finish = 0
def service(self):
"""Called to execute the task.
"""
try:
try:
self.start()
getattr(self.channel, self.m_name)(self.args)
self.finish()
except socket.error:
self.close_on_finish = 1
if self.channel.adj.log_socket_errors:
raise
except:
self.channel.exception()
finally:
if self.close_on_finish:
self.channel.close_when_done()
def cancel(self):
'See ITask'
self.channel.close_when_done()
def defer(self):
'See ITask'
pass
def start(self):
now = time.time()
self.start_time = now
def finish(self):
hit_log = self.channel.server.hit_log
if hit_log is not None:
hit_log.log(self)
| gpl-3.0 |
jwheare/digest | lib/reportlab/lib/fonts.py | 1 | 3166 | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/fonts.py
__version__=''' $Id: fonts.py 3101 2007-06-04 14:05:14Z rgbecker $ '''
import sys, os
###############################################################################
# A place to put useful font stuff
###############################################################################
#
# Font Mappings
# The brute force approach to finding the correct postscript font name;
# much safer than the rule-based ones we tried.
# preprocessor to reduce font face names to the shortest list
# possible. Add any aliases you wish; it keeps looking up
# until it finds no more translations to do. Any input
# will be lowercased before checking.
_family_alias = {
'serif':'times',
'sansserif':'helvetica',
'monospaced':'courier',
'arial':'helvetica'
}
#maps a piddle font to a postscript one.
_tt2ps_map = {
#face, bold, italic -> ps name
('times', 0, 0) :'Times-Roman',
('times', 1, 0) :'Times-Bold',
('times', 0, 1) :'Times-Italic',
('times', 1, 1) :'Times-BoldItalic',
('courier', 0, 0) :'Courier',
('courier', 1, 0) :'Courier-Bold',
('courier', 0, 1) :'Courier-Oblique',
('courier', 1, 1) :'Courier-BoldOblique',
('helvetica', 0, 0) :'Helvetica',
('helvetica', 1, 0) :'Helvetica-Bold',
('helvetica', 0, 1) :'Helvetica-Oblique',
('helvetica', 1, 1) :'Helvetica-BoldOblique',
# there is only one Symbol font
('symbol', 0, 0) :'Symbol',
('symbol', 1, 0) :'Symbol',
('symbol', 0, 1) :'Symbol',
('symbol', 1, 1) :'Symbol',
# ditto for dingbats
('zapfdingbats', 0, 0) :'ZapfDingbats',
('zapfdingbats', 1, 0) :'ZapfDingbats',
('zapfdingbats', 0, 1) :'ZapfDingbats',
('zapfdingbats', 1, 1) :'ZapfDingbats',
}
_ps2tt_map={}
for k,v in _tt2ps_map.items():
if not _ps2tt_map.has_key(k):
_ps2tt_map[v.lower()] = k
def ps2tt(psfn):
'ps fontname to family name, bold, italic'
psfn = psfn.lower()
if _ps2tt_map.has_key(psfn):
return _ps2tt_map[psfn]
raise ValueError, "Can't map determine family/bold/italic for %s" % psfn
def tt2ps(fn,b,i):
'family name + bold & italic to ps font name'
K = (fn.lower(),b,i)
if _tt2ps_map.has_key(K):
return _tt2ps_map[K]
else:
fn, b1, i1 = ps2tt(K[0])
K = fn, b1|b, i1|i
if _tt2ps_map.has_key(K):
return _tt2ps_map[K]
raise ValueError, "Can't find concrete font for family=%s, bold=%d, italic=%d" % (fn, b, i)
def addMapping(face, bold, italic, psname):
'allow a custom font to be put in the mapping'
k = face.lower(), bold, italic
_tt2ps_map[k] = psname
_ps2tt_map[psname.lower()] = k
| bsd-3-clause |
ahu-odoo/odoo | addons/auth_openid/res_users.py | 163 | 3778 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.modules.registry import RegistryManager
from openerp.osv import osv, fields
import openerp.exceptions
from openerp import tools
import utils
class res_users(osv.osv):
_inherit = 'res.users'
# TODO create helper fields for autofill openid_url and openid_email -> http://pad.openerp.com/web-openid
_columns = {
'openid_url': fields.char('OpenID URL', size=1024, copy=False),
'openid_email': fields.char('OpenID Email', size=256, copy=False,
help="Used for disambiguation in case of a shared OpenID URL"),
'openid_key': fields.char('OpenID Key', size=utils.KEY_LENGTH,
readonly=True, copy=False),
}
def _check_openid_url_email(self, cr, uid, ids, context=None):
return all(self.search_count(cr, uid, [('active', '=', True), ('openid_url', '=', u.openid_url), ('openid_email', '=', u.openid_email)]) == 1 \
for u in self.browse(cr, uid, ids, context) if u.active and u.openid_url)
def _check_openid_url_email_msg(self, cr, uid, ids, context):
return "There is already an active user with this OpenID Email for this OpenID URL"
_constraints = [
(_check_openid_url_email, lambda self, *a, **kw: self._check_openid_url_email_msg(*a, **kw), ['active', 'openid_url', 'openid_email']),
]
def _login(self, db, login, password):
result = super(res_users, self)._login(db, login, password)
if result:
return result
else:
with RegistryManager.get(db).cursor() as cr:
cr.execute("""UPDATE res_users
SET login_date=now() AT TIME ZONE 'UTC'
WHERE login=%s AND openid_key=%s AND active=%s RETURNING id""",
(tools.ustr(login), tools.ustr(password), True))
# beware: record cache may be invalid
res = cr.fetchone()
cr.commit()
return res[0] if res else False
def check(self, db, uid, passwd):
try:
return super(res_users, self).check(db, uid, passwd)
except openerp.exceptions.AccessDenied:
if not passwd:
raise
with RegistryManager.get(db).cursor() as cr:
cr.execute('''SELECT COUNT(1)
FROM res_users
WHERE id=%s
AND openid_key=%s
AND active=%s''',
(int(uid), passwd, True))
if not cr.fetchone()[0]:
raise
self._uid_cache.setdefault(db, {})[uid] = passwd
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tortib/nzbToMedia | libs/unidecode/x00a.py | 252 | 4121 | data = (
'[?]', # 0x00
'[?]', # 0x01
'N', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bb', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'v', # 0x35
'sh', # 0x36
'[?]', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'ee', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'khh', # 0x59
'ghh', # 0x5a
'z', # 0x5b
'rr', # 0x5c
'[?]', # 0x5d
'f', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'N', # 0x70
'H', # 0x71
'', # 0x72
'', # 0x73
'G.E.O.', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'N', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'[?]', # 0x8c
'eN', # 0x8d
'[?]', # 0x8e
'e', # 0x8f
'ai', # 0x90
'oN', # 0x91
'[?]', # 0x92
'o', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'ya', # 0xaf
'r', # 0xb0
'[?]', # 0xb1
'l', # 0xb2
'll', # 0xb3
'[?]', # 0xb4
'v', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'\'', # 0xbc
'\'', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'eN', # 0xc5
'[?]', # 0xc6
'e', # 0xc7
'ai', # 0xc8
'oN', # 0xc9
'[?]', # 0xca
'o', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'AUM', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'RR', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
mdblv2/joatu-django | application/site-packages/django/contrib/contenttypes/models.py | 100 | 6942 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text
from django.utils.encoding import python_2_unicode_compatible
class ContentTypeManager(models.Manager):
# Cache to avoid re-looking up ContentType objects all over the place.
# This cache is shared by all the get_for_* methods.
_cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self.__class__._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
elif model._deferred:
model = model._meta.proxy_for_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.object_name.lower())
return self.__class__._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
ct = self._get_from_cache(opts)
except KeyError:
# Load or create the ContentType entry. The smart_text() is
# needed around opts.verbose_name_raw because name_raw might be a
# django.utils.functional.__proxy__ object.
ct, created = self.get_or_create(
app_label = opts.app_label,
model = opts.object_name.lower(),
defaults = {'name': smart_text(opts.verbose_name_raw)},
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.object_name.lower())
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.object_name.lower(),
name=smart_text(opts.verbose_name_raw),
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self.__class__._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self.__class__._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
model = ct.model_class()
key = (model._meta.app_label, model._meta.object_name.lower())
self.__class__._cache.setdefault(using, {})[key] = ct
self.__class__._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
name = models.CharField(max_length=100)
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
ordering = ('name',)
unique_together = (('app_label', 'model'),)
def __str__(self):
# self.name is deprecated in favor of using model's verbose_name, which
# can be translated. Formal deprecation is delayed until we have DB
# migration to be able to remove the field from the database along with
# the attribute.
#
# We return self.name only when users have changed its value from the
# initial verbose_name_raw and might rely on it.
model = self.model_class()
if not model or self.name != model._meta.verbose_name_raw:
return self.name
else:
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
from django.db import models
return models.get_model(self.app_label, self.model,
only_installed=False)
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
| apache-2.0 |
fajoy/nova | nova/virt/libvirt/firewall.py | 3 | 10532 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import tpool
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.config')
CONF.import_opt('vpn_image_id', 'nova.config')
try:
import libvirt
except ImportError:
LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will "
"not work correctly."))
class NWFilterFirewall(base_firewall.FirewallDriver):
"""
This class implements a network filtering mechanism by using
libvirt's nwfilter.
all instances get a filter ("nova-base") applied. This filter
provides some basic security such as protection against MAC
spoofing, IP spoofing, and ARP spoofing.
"""
def __init__(self, virtapi, get_connection, **kwargs):
super(NWFilterFirewall, self).__init__(virtapi)
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter"""
pass
def _get_connection(self):
return self._libvirt_get_connection()
_conn = property(_get_connection)
@staticmethod
def nova_no_nd_reflection_filter():
"""
This filter protects false positives on IPv6 Duplicate Address
Detection(DAD).
"""
return '''<filter name='nova-no-nd-reflection' chain='ipv6'>
<!-- no nd reflection -->
<!-- drop if destination mac is v6 mcast mac addr and
we sent it. -->
<rule action='drop' direction='in'>
<mac dstmacaddr='33:33:00:00:00:00'
dstmacmask='ff:ff:00:00:00:00' srcmacaddr='$MAC'/>
</rule>
</filter>'''
@staticmethod
def nova_dhcp_filter():
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
ebtables to allow traffic through. Without a corresponding rule in
iptables, it'll get blocked anyway."""
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
<uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
<rule action='accept' direction='out'
priority='100'>
<udp srcipaddr='0.0.0.0'
dstipaddr='255.255.255.255'
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
</rule>
</filter>'''
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
LOG.info(_('Called setup_basic_filtering in nwfilter'),
instance=instance)
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
return
LOG.info(_('Ensuring static filters'), instance=instance)
self._ensure_static_filters()
allow_dhcp = False
for (network, mapping) in network_info:
if mapping['dhcp_server']:
allow_dhcp = True
break
if instance['image_ref'] == str(CONF.vpn_image_id):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
else:
base_filter = 'nova-nodhcp'
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
self._define_filter(self._filter_container(instance_filter_name,
[base_filter]))
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
if CONF.use_ipv6:
self._define_filter(self.nova_no_nd_reflection_filter)
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
filter_set.append('allow-dhcp-server')
self._define_filter(self._filter_container('nova-base', filter_set))
self._define_filter(self._filter_container('nova-vpn',
['allow-dhcp-server']))
self._define_filter(self.nova_dhcp_filter)
self.static_filters_configured = True
def _filter_container(self, name, filters):
xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
name,
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
def _define_filter(self, xml):
if callable(xml):
xml = xml()
# execute in a native thread and block current greenthread until done
if not CONF.libvirt_nonblocking:
# NOTE(maoy): the original implementation is to have the API called
# in the thread pool no matter what.
tpool.execute(self._conn.nwfilterDefineXML, xml)
else:
# NOTE(maoy): self._conn is an eventlet.tpool.Proxy object
self._conn.nwfilterDefineXML(xml)
def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
instance_name = instance['name']
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
_nw = self._conn.nwfilterLookupByName(instance_filter_name)
_nw.undefine()
except libvirt.libvirtError as e:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in
# use (ie. when the instance has not terminated properly)
raise
LOG.debug(_('The nwfilter(%(instance_filter_name)s) '
'is not found.') % locals(),
instance=instance)
def _define_filters(self, filter_name, filter_children):
self._define_filter(self._filter_container(filter_name,
filter_children))
@staticmethod
def _instance_filter_name(instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance['name'])
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance['name']
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.') % locals(),
instance=instance)
return False
return True
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def __init__(self, virtapi, execute=None, **kwargs):
super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
self.nwfilter = NWFilterFirewall(virtapi, kwargs['get_connection'])
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basicly_filtered:
LOG.debug(_('iptables firewall: Setup Basic Filtering'),
instance=instance)
self.refresh_provider_fw_rules()
self.basicly_filtered = True
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter"""
pass
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
if self.instances.pop(instance['id'], None):
# NOTE(vish): use the passed info instead of the stored info
self.network_infos.pop(instance['id'])
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
return self.nwfilter.instance_filter_exists(instance, network_info)
| apache-2.0 |
JakeLowey/HackRPI2 | django/contrib/gis/gdal/geomtype.py | 404 | 2967 | from django.contrib.gis.gdal.error import OGRException
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit : 'MultiLineString25D',
6 + wkb25bit : 'MultiPolygon25D',
7 + wkb25bit : 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, basestring):
type_input = type_input.lower()
if type_input == 'geometry': type_input='unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, basestring):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
| mit |
liwushuo/Flask-CacheOBJ | tests.py | 2 | 2939 | # -*- coding: utf-8 -*-
import pytest
import fakeredis
from flask import Flask
from flask.ext.cacheobj import FlaskCacheOBJ, Msgpackable
from flask.ext.cacheobj import set_counter, get_counter, dec_counter, inc_counter
app = Flask(__name__)
cache = FlaskCacheOBJ()
cache.init_app(app)
@pytest.fixture
def app(request):
app = Flask(__name__)
ctx = app.app_context()
ctx.push()
request.addfinalizer(ctx.pop)
return app
@pytest.fixture
def cache(app, request):
cache = FlaskCacheOBJ()
app.config['CACHE_HOST'] = 'localhost'
cache.init_app(app)
cache.mc = fakeredis.FakeStrictRedis()
request.addfinalizer(cache.mc.flushall)
return cache
def test_mc_initialized(cache):
assert cache.mc
class Obj(Msgpackable):
_msgpack_key = 'CacheOBJ'
def __init__(self, id):
self.id = id
def test_cache_obj(cache):
@cache.obj({'key': 'test_cache_obj:{id}', 'expire': 1})
def get(id):
return Obj(id)
assert get.cache_key_reg
assert not cache.mc.get('test_cache_obj:1')
assert get(1)
assert cache.mc.get('test_cache_obj:1')
assert get(1)
def test_cache_list(cache):
@cache.list({'key': 'test_cache_list:{id}', 'expire': 1})
def get(id):
return range(id)
assert get.cache_key_reg
assert not cache.mc.smembers('test_cache_list:1')
assert get(1)
assert cache.mc.exists('test_cache_list:1')
assert get(1)
def test_cache_hash(cache):
@cache.hash({'key': '{id}', 'hash_key': 'item', 'expire': 1})
def get(id):
return Obj(1)
assert get.cache_key_reg
assert not cache.mc.hget('item', '1')
assert get(1)
assert cache.mc.hget('item', '1')
assert get(1)
def test_cache_counter(cache):
@cache.counter({'key': 'test_cache_counter:{id}', 'expire': 1})
def get(id):
return int(id)
assert not cache.mc.get('test_cache_counter:1')
assert get(1)
assert int(cache.mc.get('test_cache_counter:1'))
assert get(1)
def test_cache_delete(cache):
@cache.counter({'key': 'test_cache_counter:{id}', 'expire': 1})
def get(id):
return int(id)
@cache.delete({'key': 'test_cache_counter:{id}'})
def update(id):
return int(id)
get(1)
assert get.cache_key_reg
assert int(cache.mc.get('test_cache_counter:1'))
assert update(1)
assert not cache.mc.get('test_cache_counter:1')
def test_set_counter(cache):
pattern = dict(key='test_set_counter:{id}')
set_counter(pattern, 0, id=1)
assert get_counter(pattern, id=1) == 0
def test_inc_counter(cache):
pattern = dict(key='test_inc_counter:{id}')
set_counter(pattern, 0, id=1)
inc_counter(pattern, delta=10, id=1)
assert get_counter(pattern, id=1) == 10
def test_dec_counter(cache):
pattern = dict(key='test_dec_counter:{id}')
set_counter(pattern, 10, id=1)
dec_counter(pattern, delta=10, id=1)
assert get_counter(pattern, id=1) == 0
| mit |
OspreyX/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
kevenli/scrapydd | scrapydd/ssl_gen.py | 1 | 15483 | from OpenSSL import crypto
from os import path, makedirs, remove
from datetime import datetime
import re
from shutil import copy
import sys
import os
key_dir = 'keys'
index_file = os.path.join(key_dir, 'index.txt')
class SSLCertificateGenerator:
key_dir = None
index_file = None
serial = None
def __init__(self, key_dir=None):
# Define key_dir
if key_dir:
key_dir = key_dir.replace('\\', '/')
if not os.path.exists(key_dir):
os.makedirs(key_dir)
if not os.path.isdir(key_dir):
raise Exception("Key Directory does not exist or is not a directory:" + key_dir)
else:
#key_dir = path.dirname(path.realpath(__file__)) + "/../keys"
key_dir = path.join(path.dirname(path.realpath(__file__)), '..', 'keys')
key_dir = key_dir.replace('\\', '/')
self.key_dir = key_dir
self.index_file = key_dir + '/index.txt'
# Get serial number
try:
serial_file = open(key_dir + '/serial', 'r')
self.serial = int(serial_file.readline());
serial_file.close
except IOError:
self.serial = 1
def _get_cert_dn(self, cert):
dn = ''
for label, value in cert.get_subject().get_components():
dn += '/' + label + '=' + value
return dn
def _gen_key(self):
# Generate new key
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
return key
def _create_csr(self, cert_name, key):
req = crypto.X509Req()
req.get_subject().CN = cert_name
req.set_pubkey(key)
req.sign(key, "sha256")
return req
def _write_key_to_file(self, key, filepath):
key_file = open(filepath, 'w')
key_file.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key).decode("utf-8"))
key_file.close()
def _load_key_from_file(self, filepath):
key_file = open(filepath, 'r')
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_file.read())
key_file.close()
return key
def _write_cert_to_file(self, cert, filepath):
cert_file = open(filepath, 'w')
cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode("utf-8"))
cert_file.close()
def _load_cert_from_file(self, filepath):
cert_file = open(filepath, 'r')
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_file.read())
cert_file.close()
return cert
def _write_csr_to_file(self, csr, filepath):
csr_file = open(filepath, 'w')
csr_file.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr).decode("utf-8"))
csr_file.close()
def _load_csr_from_file(self, filepath):
csr_file = open(filepath, 'r')
csr = crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_file.read())
csr_file.close()
return csr
def _write_pfx_to_file(self, pkcs12, filepath):
pkcs12_file = open(filepath, 'wb')
pkcs12_file.write(pkcs12.export())
pkcs12_file.close()
def _write_crl_to_file(self, crl, ca_cert, ca_key, filepath):
# Write CRL file
crl_file = open(filepath, 'w')
crl_file.write(crl.export(ca_cert, ca_key, days=365).decode("utf-8"))
crl_file.close()
def _load_crl_from_file(self, filepath):
try:
crl_file = open(filepath, 'r')
crl = crypto.load_crl(crypto.FILETYPE_PEM, crl_file.read())
crl_file.close()
except IOError:
# Create new CRL file if it doesn't exist
crl = crypto.CRL()
return crl
def _sign_csr(self, req, ca_key, ca_cert, cert_org=False, cert_ou=False, usage=3, days=3650, alt_names=[]):
expiry_seconds = days * 86400
# Create and sign certificate
cert = crypto.X509()
cert.set_version(2)
cert.set_subject(req.get_subject())
if cert_org:
cert.get_subject().O = cert_org
else:
cert.get_subject().O = ca_cert.get_subject().O
if cert_ou:
cert.get_subject().OU = cert_ou
else:
cert.get_subject().OU = ca_cert.get_subject().OU
cert.set_serial_number(self.serial)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(expiry_seconds)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(req.get_pubkey())
if usage == 1:
cert.add_extensions([
crypto.X509Extension(b"basicConstraints", True, b"CA:TRUE, pathlen:0"),
crypto.X509Extension(b"keyUsage", True, b"keyCertSign, cRLSign"),
crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash", subject=cert)
])
elif usage == 2:
cert.add_extensions([
crypto.X509Extension(b"extendedKeyUsage", True, b"serverAuth"),
])
elif usage == 3:
cert.add_extensions([
crypto.X509Extension(b"extendedKeyUsage", True, b"clientAuth"),
])
# Add alt names
if alt_names:
for name in alt_names:
name = "DNS:" + name
cert.add_extensions([
crypto.X509Extension(b"subjectAltName", False, b"DNS:" + ",DNS:".join(alt_names).encode("utf-8"))
])
cert.sign(ca_key, "sha256")
# Write to index.txt
db_line = "V\t" + cert.get_notBefore().decode("utf-8") + "\t\t" + hex(
int(cert.get_serial_number())) + "\tunknown\t" + str(cert.get_subject())[18:-2] + "\n"
index_file = open(key_dir + '/index.txt', 'a')
index_file.write(db_line)
index_file.close()
# Write updated serial file
serial_file = open(key_dir + '/serial', 'w')
serial_file.write(str(self.serial + 1))
serial_file.close()
return cert
def gen_ca(self, cert_org="Thinkbox Software", cert_ou="IT", days=3650):
expiry_seconds = days * 86400
# Generate key
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
# Set up and sign CA certificate
ca = crypto.X509()
ca.set_version(2)
ca.set_serial_number(1)
ca.get_subject().CN = "CA"
ca.get_subject().O = cert_org
ca.get_subject().OU = cert_ou
ca.gmtime_adj_notBefore(0)
ca.gmtime_adj_notAfter(expiry_seconds)
ca.set_issuer(ca.get_subject())
ca.set_pubkey(key)
ca.add_extensions([
crypto.X509Extension(b"basicConstraints", True, b"CA:TRUE, pathlen:0"),
crypto.X509Extension(b"keyUsage", True, b"keyCertSign, cRLSign"),
crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash", subject=ca)
])
ca.sign(key, "sha256")
# Create key directory if it doesn't exist
if not path.exists(key_dir):
makedirs(key_dir)
# Write CA certificate to file
self._write_cert_to_file(ca, self.key_dir + '/ca.crt')
# Write CA key to file
self._write_key_to_file(key, self.key_dir + '/ca.key')
def get_ca_key(self):
self._load_key_from_file(path.join(self.key_dir, 'ca.key'))
def get_ca_cert(self):
self._load_cert_from_file(path.join(self.key_dir, 'ca.crt'))
def gen_cert(self, cert_name, cert_org=False, cert_ou=False, usage=3, days=3650, alt_names=[]):
# usage: 1=ca, 2=server, 3=client
if cert_name == "":
raise Exception("Certificate name cannot be blank")
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load CA key
ca_key = self._load_key_from_file(self.key_dir + '/ca.key')
# Generate new key
key = self._gen_key()
# Create CSR
req = self._create_csr(cert_name, key)
# Sign CSR
cert = self._sign_csr(req, ca_key, ca_cert, cert_org=cert_org, cert_ou=cert_ou, usage=usage, days=days,
alt_names=alt_names)
# Write new key file
self._write_key_to_file(key, self.key_dir + '/' + cert_name + '.key')
# Write new certificate file
self._write_cert_to_file(cert, self.key_dir + '/' + cert_name + '.crt')
def gen_pfx(self, cert_name):
if cert_name == "":
raise Exception("Certificate name cannot be blank")
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load Certificate
cert = self._load_cert_from_file(self.key_dir + '/' + cert_name + '.crt')
# Load Private Key
key = self._load_key_from_file(self.key_dir + '/' + cert_name + '.key')
# Set up PKCS12 structure
pkcs12 = crypto.PKCS12()
pkcs12.set_ca_certificates([ca_cert])
pkcs12.set_certificate(cert)
pkcs12.set_privatekey(key)
# Write PFX file
self._write_pfx_to_file(pkcs12, self.key_dir + '/' + cert_name + '.pfx')
def gen_csr(self, name, out_dir):
key = self._gen_key()
csr = self._create_csr(name, key)
self._write_key_to_file(key, out_dir + '/' + name + '.key')
self._write_csr_to_file(csr, out_dir + '/' + name + '.csr')
def sign_csr(self, csr_path):
csr = self._load_csr_from_file(csr_path)
ca_key = self._load_key_from_file(key_dir + '/ca.key')
ca_cert = self._load_cert_from_file(key_dir + '/ca.crt')
cert = self._sign_csr(csr, ca_key, ca_cert)
self._write_cert_to_file(cert, self.key_dir + '/' + csr.get_subject().CN + '.crt')
def revoke_cert(self, cert_name):
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load CA Key
ca_key = self._load_key_from_file(self.key_dir + '/ca.key')
# Load Certificate
cert = self._load_cert_from_file(self.key_dir + '/' + cert_name + '.crt')
# Load Private Key
key = self._load_key_from_file(self.key_dir + '/' + cert_name + '.key')
# Load CRL File
crl = self._load_crl_from_file(self.key_dir + '/crl.pem')
print ('Revoking ' + cert_name + ' (Serial: ' + str(cert.get_serial_number()) + ')')
# Revoke certificate
revoked = crypto.Revoked()
revoked.set_serial(hex(int(cert.get_serial_number()))[2:].encode("utf-8"))
revoked.set_reason(b'unspecified')
revoked.set_rev_date(datetime.utcnow().strftime('%Y%m%d%H%M%SZ').encode("utf-8"))
crl.add_revoked(revoked)
# Write CRL file
self._write_crl_to_file(crl, ca_cert, ca_key, key_dir + '/crl.pem')
# Update index file
index_file = open(key_dir + '/index.txt', 'r')
index_file_new = open(key_dir + '/index.txt.new', 'w')
for line in index_file.readlines():
line_split = re.split('\t', line)
if int(line_split[3], 16) == cert.get_serial_number():
new_line = 'R\t' + line_split[1] + '\t' + revoked.get_rev_date().decode("utf-8") + '\t' + line_split[
3] + '\t' + line_split[4] + '\t' + line_split[5]
index_file_new.write(new_line)
else:
index_file_new.write(line)
index_file.close()
index_file_new.close()
copy('keys/index.txt.new', 'keys/index.txt')
remove('keys/index.txt.new')
def renew_crl(self):
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load CA key
ca_key = self._load_key_from_file(self.key_dir + '/ca.key')
# Load CRL File
crl = self._load_crl_from_file(self.key_dir + '/crl.pem')
# Write CRL file
self._write_crl_to_file(crl, ca_cert, ca_key, key_dir + '/crl.pem')
def run(argv=None):
if argv is None:
argv = sys.argv
import argparse
parser = argparse.ArgumentParser(description='SSL Certificate Generator')
arg_group = parser.add_mutually_exclusive_group()
arg_group.add_argument('--ca', action='store_true', help='Generate a CA certificate')
arg_group.add_argument('--intermediate-ca', action='store_true', help='Generate an intermediate ca certificate')
arg_group.add_argument('--server', action='store_true', help='Generate a server certificate')
arg_group.add_argument('--client', action='store_true', help='Generate a client certificate')
arg_group.add_argument('--pfx', action='store_true', help='Generate a PFX File')
arg_group.add_argument('--revoke', action='store_true', help='Revoke a certificate')
arg_group.add_argument('--renew-crl', action='store_true', help='Renew CRL')
parser.add_argument('--cert-name', help='Certificate name (required with --server, --client, and --pfx)')
parser.add_argument('--cert-org', help='Certificate organization (required with --ca)')
parser.add_argument('--cert-ou', help='Certificate organizational unit (required with --ca)')
parser.add_argument('--alt-name', help='Subject Alternative Name', action='append')
args = parser.parse_args()
sslgen = SSLCertificateGenerator(key_dir)
if args.ca:
error = False
if args.cert_name:
print('Error: Certificate name was specified. CA certificate is always named "ca"')
error = True
if not args.cert_ou:
print("Error: No OU specified")
error = True
if not args.cert_org:
print("Error: No organization specified")
error = True
if error:
exit(1)
sslgen.gen_ca(cert_org=args.cert_org, cert_ou=args.cert_ou)
elif args.intermediate_ca:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_cert(args.cert_name, cert_org=args.cert_org, cert_ou=args.cert_ou, usage=1)
elif args.server:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_cert(args.cert_name, cert_org=args.cert_org, cert_ou=args.cert_ou, usage=2, alt_names=args.alt_name)
elif args.client:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_cert(args.cert_name, cert_org=args.cert_org, cert_ou=args.cert_ou, usage=3, alt_names=args.alt_name)
elif args.pfx:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_pfx(args.cert_name)
elif args.revoke:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.revoke_cert(args.cert_name)
elif args.renew_crl:
sslgen.renew_crl()
else:
print("Error: Certificate type must be specified using [--ca|--server|--client|--pfx]")
exit(1)
if __name__ == '__main__':
run() | apache-2.0 |
neilLasrado/erpnext | erpnext/education/doctype/student_report_generation_tool/student_report_generation_tool.py | 5 | 3881 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.model.document import Document
from erpnext.education.api import get_grade
from frappe.utils.pdf import get_pdf
from erpnext.education.report.course_wise_assessment_report.course_wise_assessment_report import get_formatted_result
from erpnext.education.report.course_wise_assessment_report.course_wise_assessment_report import get_child_assessment_groups
class StudentReportGenerationTool(Document):
pass
@frappe.whitelist()
def preview_report_card(doc):
doc = frappe._dict(json.loads(doc))
doc.students = [doc.student]
if not (doc.student_name and doc.student_batch):
program_enrollment = frappe.get_all("Program Enrollment", fields=["student_batch_name", "student_name"],
filters={"student": doc.student, "docstatus": ('!=', 2), "academic_year": doc.academic_year})
if program_enrollment:
doc.batch = program_enrollment[0].student_batch_name
doc.student_name = program_enrollment[0].student_name
# get the assessment result of the selected student
values = get_formatted_result(doc, get_course=True, get_all_assessment_groups=doc.include_all_assessment)
assessment_result = values.get("assessment_result").get(doc.student)
courses = values.get("course_dict")
course_criteria = get_courses_criteria(courses)
# get the assessment group as per the user selection
if doc.include_all_assessment:
assessment_groups = get_child_assessment_groups(doc.assessment_group)
else:
assessment_groups = [doc.assessment_group]
# get the attendance of the student for that peroid of time.
doc.attendance = get_attendance_count(doc.students[0], doc.academic_year, doc.academic_term)
template = "erpnext/education/doctype/student_report_generation_tool/student_report_generation_tool.html"
base_template_path = "frappe/www/printview.html"
from frappe.www.printview import get_letter_head
letterhead = get_letter_head(frappe._dict({"letter_head": doc.letterhead}), not doc.add_letterhead)
html = frappe.render_template(template,
{
"doc": doc,
"assessment_result": assessment_result,
"courses": courses,
"assessment_groups": assessment_groups,
"course_criteria": course_criteria,
"letterhead": letterhead and letterhead.get('content', None),
"add_letterhead": doc.add_letterhead if doc.add_letterhead else 0
})
final_template = frappe.render_template(base_template_path, {"body": html, "title": "Report Card"})
frappe.response.filename = "Report Card " + doc.students[0] + ".pdf"
frappe.response.filecontent = get_pdf(final_template)
frappe.response.type = "download"
def get_courses_criteria(courses):
course_criteria = frappe._dict()
for course in courses:
course_criteria[course] = [d.assessment_criteria for d in frappe.get_all("Course Assessment Criteria",
fields=["assessment_criteria"], filters={"parent": course})]
return course_criteria
def get_attendance_count(student, academic_year, academic_term=None):
if academic_year:
from_date, to_date = frappe.db.get_value("Academic Year", academic_year, ["year_start_date", "year_end_date"])
elif academic_term:
from_date, to_date = frappe.db.get_value("Academic Term", academic_term, ["term_start_date", "term_end_date"])
if from_date and to_date:
attendance = dict(frappe.db.sql('''select status, count(student) as no_of_days
from `tabStudent Attendance` where student = %s
and date between %s and %s group by status''',
(student, from_date, to_date)))
if "Absent" not in attendance.keys():
attendance["Absent"] = 0
if "Present" not in attendance.keys():
attendance["Present"] = 0
return attendance
else:
frappe.throw(_("Provide the academic year and set the starting and ending date."))
| gpl-3.0 |
rhdekker/collatex | collatex-pythonport/docs/conf.py | 4 | 8473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import collatex
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CollateX-Python'
copyright = u'2014, Ronald Haentjens Dekker'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = collatex.__version__
# The full version, including alpha/beta/rc tags.
release = collatex.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'collatexdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'collatex.tex',
u'CollateX-Python Documentation',
u'Ronald Haentjens Dekker', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'collatex',
u'CollateX-Python Documentation',
[u'Ronald Haentjens Dekker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'collatex',
u'CollateX-Python Documentation',
u'Ronald Haentjens Dekker',
'Collatex',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | gpl-3.0 |
tongwang01/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/layers/decisions_to_data.py | 45 | 9715 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Treats a decision tree as a representation transformation layer.
A decision tree transformer takes features as input and returns the probability
of reaching each leaf as output. The routing throughout the tree is learnable
via backpropagation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
class DecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions as data."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner.get_device(self.layer_num)):
self.tree_parameters = variable_scope.get_variable(
name='tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def __init__(self, params, layer_num, device_assigner,
*args, **kwargs):
super(DecisionsToDataLayer, self).__init__(
params, layer_num, device_assigner, *args, **kwargs)
self.training_ops = training_ops.Load()
def inference_graph(self, data):
with ops.device(self.device_assigner.get_device(self.layer_num)):
routing_probabilities = self.training_ops.routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes)
output = array_ops.slice(
routing_probabilities,
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class KFeatureDecisionsToDataLayer(hybrid_layer.HybridLayer):
"""A layer that treats soft decisions made on single features as data."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner.get_device(self.layer_num)):
self.tree_parameters = variable_scope.get_variable(
name='tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features_per_node],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def __init__(self, params, layer_num, device_assigner,
*args, **kwargs):
super(KFeatureDecisionsToDataLayer, self).__init__(
params, layer_num, device_assigner, *args, **kwargs)
self.training_ops = training_ops.Load()
# pylint: disable=unused-argument
def inference_graph(self, data):
with ops.device(self.device_assigner.get_device(self.layer_num)):
routing_probabilities = self.training_ops.k_feature_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
num_features_per_node=self.params.num_features_per_node,
layer_num=0,
random_seed=self.params.base_random_seed)
output = array_ops.slice(
routing_probabilities,
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class HardDecisionsToDataLayer(DecisionsToDataLayer):
"""A layer that learns a soft decision tree but treats it as hard at test."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner.get_device(self.layer_num)):
self.tree_parameters = variable_scope.get_variable(
name='hard_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=variable_scope.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='hard_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=variable_scope.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def soft_inference_graph(self, data):
return super(HardDecisionsToDataLayer, self).inference_graph(data)
def inference_graph(self, data):
with ops.device(self.device_assigner.get_device(self.layer_num)):
path_probability, path = self.training_ops.hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
tree_depth=self.params.hybrid_tree_depth)
output = array_ops.slice(
self.training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class StochasticHardDecisionsToDataLayer(HardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner.get_device(self.layer_num)):
self.tree_parameters = variable_scope.get_variable(
name='stochastic_hard_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='stochastic_hard_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def soft_inference_graph(self, data):
with ops.device(self.device_assigner.get_device(self.layer_num)):
path_probability, path = (
self.training_ops.stochastic_hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
tree_depth=self.params.hybrid_tree_depth,
random_seed=self.params.base_random_seed))
output = array_ops.slice(
self.training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
def inference_graph(self, data):
with ops.device(self.device_assigner.get_device(self.layer_num)):
path_probability, path = self.training_ops.hard_routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
tree_depth=self.params.hybrid_tree_depth)
output = array_ops.slice(
self.training_ops.unpack_path(path, path_probability),
[0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return output
class StochasticSoftDecisionsToDataLayer(StochasticHardDecisionsToDataLayer):
"""A layer that learns a soft decision tree by sampling paths."""
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner.get_device(self.layer_num)):
self.tree_parameters = variable_scope.get_variable(
name='stochastic_soft_tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='stochastic_soft_tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
def inference_graph(self, data):
with ops.device(self.device_assigner.get_device(self.layer_num)):
routes = self.training_ops.routing_function(
data,
self.tree_parameters,
self.tree_thresholds,
max_nodes=self.params.num_nodes)
leaf_routes = array_ops.slice(
routes, [0, self.params.num_nodes - self.params.num_leaves - 1],
[-1, self.params.num_leaves])
return leaf_routes
| apache-2.0 |
iqt4/openwrt | tools/b43-tools/files/b43-fwsquash.py | 494 | 4767 | #!/usr/bin/env python
#
# b43 firmware file squasher
# Removes unnecessary firmware files
#
# Copyright (c) 2009 Michael Buesch <mb@bu3sch.de>
#
# Licensed under the GNU/GPL version 2 or (at your option) any later version.
#
import sys
import os
def usage():
print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0])
print("")
print("PHYTYPES is a comma separated list of:")
print("A => A-PHY")
print("AG => Dual A-PHY G-PHY")
print("G => G-PHY")
print("LP => LP-PHY")
print("N => N-PHY")
print("HT => HT-PHY")
print("LCN => LCN-PHY")
print("LCN40 => LCN40-PHY")
print("AC => AC-PHY")
print("")
print("COREREVS is a comma separated list of core revision numbers.")
if len(sys.argv) != 4:
usage()
sys.exit(1)
phytypes = sys.argv[1]
corerevs = sys.argv[2]
fwpath = sys.argv[3]
phytypes = phytypes.split(',')
try:
corerevs = map(lambda r: int(r), corerevs.split(','))
except ValueError:
print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs)
usage()
sys.exit(1)
fwfiles = os.listdir(fwpath)
fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles)
if not fwfiles:
print("ERROR: No firmware files found in %s" % fwpath)
sys.exit(1)
required_fwfiles = []
def revs_match(revs_a, revs_b):
for rev in revs_a:
if rev in revs_b:
return True
return False
def phytypes_match(types_a, types_b):
for type in types_a:
type = type.strip().upper()
if type in types_b:
return True
return False
revmapping = {
"ucode2.fw" : ( (2,3,), ("G",), ),
"ucode4.fw" : ( (4,), ("G",), ),
"ucode5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
"ucode11.fw" : ( (11,12,), ("N",), ),
"ucode13.fw" : ( (13,), ("LP","G",), ),
"ucode14.fw" : ( (14,), ("LP",), ),
"ucode15.fw" : ( (15,), ("LP",), ),
"ucode16_mimo.fw" : ( (16,17,18,19,23,), ("N",), ),
# "ucode16_lp.fw" : ( (16,17,18,19,), ("LP",), ),
"ucode24_lcn.fw" : ( (24,), ("LCN",), ),
"ucode25_mimo.fw" : ( (25,28,), ("N",), ),
"ucode25_lcn.fw" : ( (25,28,), ("LCN",), ),
"ucode26_mimo.fw" : ( (26,), ("HT",), ),
"ucode29_mimo.fw" : ( (29,), ("HT",), ),
"ucode30_mimo.fw" : ( (30,), ("N",), ),
"ucode33_lcn40.fw" : ( (33,), ("LCN40",), ),
"ucode40.fw" : ( (40,), ("AC",), ),
"ucode42.fw" : ( (42,), ("AC",), ),
"pcm4.fw" : ( (1,2,3,4,), ("G",), ),
"pcm5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
}
initvalmapping = {
"a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ),
"b0g0initvals2.fw" : ( (2,4,), ("G",), ),
"b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"b0g0initvals13.fw" : ( (13,), ("G",), ),
"n0initvals11.fw" : ( (11,12,), ("N",), ),
"n0initvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0initvals24.fw" : ( (24,), ("N",), ),
"n0initvals25.fw" : ( (25,28,), ("N",), ),
"n16initvals30.fw" : ( (30,), ("N",), ),
"lp0initvals13.fw" : ( (13,), ("LP",), ),
"lp0initvals14.fw" : ( (14,), ("LP",), ),
"lp0initvals15.fw" : ( (15,), ("LP",), ),
# "lp0initvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0initvals24.fw" : ( (24,), ("LCN",), ),
"ht0initvals26.fw" : ( (26,), ("HT",), ),
"ht0initvals29.fw" : ( (29,), ("HT",), ),
"lcn400initvals33.fw" : ( (33,), ("LCN40",), ),
"ac0initvals40.fw" : ( (40,), ("AC",), ),
"ac1initvals42.fw" : ( (42,), ("AC",), ),
"a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ),
"b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"n0bsinitvals11.fw" : ( (11,12,), ("N",), ),
"n0bsinitvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0bsinitvals24.fw" : ( (24,), ("N",), ),
"n0bsinitvals25.fw" : ( (25,28,), ("N",), ),
"n16bsinitvals30.fw" : ( (30,), ("N",), ),
"lp0bsinitvals13.fw" : ( (13,), ("LP",), ),
"lp0bsinitvals14.fw" : ( (14,), ("LP",), ),
"lp0bsinitvals15.fw" : ( (15,), ("LP",), ),
# "lp0bsinitvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0bsinitvals24.fw" : ( (24,), ("LCN",), ),
"ht0bsinitvals26.fw" : ( (26,), ("HT",), ),
"ht0bsinitvals29.fw" : ( (29,), ("HT",), ),
"lcn400bsinitvals33.fw" : ( (33,), ("LCN40",), ),
"ac0bsinitvals40.fw" : ( (40,), ("AC",), ),
"ac1bsinitvals42.fw" : ( (42,), ("AC",), ),
}
for f in fwfiles:
if f in revmapping:
if revs_match(corerevs, revmapping[f][0]) and\
phytypes_match(phytypes, revmapping[f][1]):
required_fwfiles += [f]
continue
if f in initvalmapping:
if revs_match(corerevs, initvalmapping[f][0]) and\
phytypes_match(phytypes, initvalmapping[f][1]):
required_fwfiles += [f]
continue
print("WARNING: Firmware file %s not found in the mapping lists" % f)
for f in fwfiles:
if f not in required_fwfiles:
print("Deleting %s" % f)
os.unlink(fwpath + '/' + f)
| gpl-2.0 |
maximatorrus/automated_testing_python | test/test_user_info.py | 1 | 1810 | import re
from random import randrange
def test_all_info(app):
users = app.user.get_user_list()
index = randrange(len(users))
user_from_home_page = app.user.get_user_list()[index]
user_from_edit_page = app.user.get_user_info_from_edit_page(index)
assert user_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(user_from_edit_page)
assert user_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(user_from_edit_page)
assert user_from_home_page.firstname == user_from_edit_page.firstname
assert user_from_home_page.lastname == user_from_edit_page.lastname
assert user_from_home_page.address == user_from_edit_page.address
def test_phones_on_user_view_page(app):
user_from_view_page = app.user.get_user_from_view_page(0)
user_from_edit_page = app.user.get_user_info_from_edit_page(0)
assert user_from_view_page.telephone == user_from_edit_page.telephone
assert user_from_view_page.mobile == user_from_edit_page.mobile
assert user_from_view_page.work == user_from_edit_page.work
assert user_from_view_page.secondaryphone == user_from_edit_page.secondaryphone
def clear(str):
return re.sub("[() -]", "", str)
def merge_phones_like_on_home_page(user):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x), filter(lambda x: x is not None,
[user.telephone, user.mobile, user.work,
user.secondaryphone]))))
def merge_emails_like_on_home_page(user):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[user.email_, user.email2, user.email3])))
| apache-2.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env32/lib/python2.7/site-packages/twisted/protocols/haproxy/test/test_v1parser.py | 13 | 4694 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.protocols.haproxy.V1Parser}.
"""
from twisted.trial import unittest
from twisted.internet import address
from .._exceptions import (
InvalidProxyHeader, InvalidNetworkProtocol, MissingAddressData
)
from .. import _v1parser
class V1ParserTests(unittest.TestCase):
"""
Test L{twisted.protocols.haproxy.V1Parser} behaviour.
"""
def test_missingPROXYHeaderValue(self):
"""
Test that an exception is raised when the PROXY header is missing.
"""
self.assertRaises(
InvalidProxyHeader,
_v1parser.V1Parser.parse,
b'NOTPROXY ',
)
def test_invalidNetworkProtocol(self):
"""
Test that an exception is raised when the proto is not TCP or UNKNOWN.
"""
self.assertRaises(
InvalidNetworkProtocol,
_v1parser.V1Parser.parse,
b'PROXY WUTPROTO ',
)
def test_missingSourceData(self):
"""
Test that an exception is raised when the proto has no source data.
"""
self.assertRaises(
MissingAddressData,
_v1parser.V1Parser.parse,
b'PROXY TCP4 ',
)
def test_missingDestData(self):
"""
Test that an exception is raised when the proto has no destination.
"""
self.assertRaises(
MissingAddressData,
_v1parser.V1Parser.parse,
b'PROXY TCP4 127.0.0.1 8080 8888',
)
def test_fullParsingSuccess(self):
"""
Test that parsing is successful for a PROXY header.
"""
info = _v1parser.V1Parser.parse(
b'PROXY TCP4 127.0.0.1 127.0.0.1 8080 8888',
)
self.assertIsInstance(info.source, address.IPv4Address)
self.assertEqual(info.source.host, b'127.0.0.1')
self.assertEqual(info.source.port, 8080)
self.assertEqual(info.destination.host, b'127.0.0.1')
self.assertEqual(info.destination.port, 8888)
def test_fullParsingSuccess_IPv6(self):
"""
Test that parsing is successful for an IPv6 PROXY header.
"""
info = _v1parser.V1Parser.parse(
b'PROXY TCP6 ::1 ::1 8080 8888',
)
self.assertIsInstance(info.source, address.IPv6Address)
self.assertEqual(info.source.host, b'::1')
self.assertEqual(info.source.port, 8080)
self.assertEqual(info.destination.host, b'::1')
self.assertEqual(info.destination.port, 8888)
def test_fullParsingSuccess_UNKNOWN(self):
"""
Test that parsing is successful for a UNKNOWN PROXY header.
"""
info = _v1parser.V1Parser.parse(
b'PROXY UNKNOWN anything could go here',
)
self.assertIsNone(info.source)
self.assertIsNone(info.destination)
def test_feedParsing(self):
"""
Test that parsing happens when fed a complete line.
"""
parser = _v1parser.V1Parser()
info, remaining = parser.feed(b'PROXY TCP4 127.0.0.1 127.0.0.1 ')
self.assertFalse(info)
self.assertFalse(remaining)
info, remaining = parser.feed(b'8080 8888')
self.assertFalse(info)
self.assertFalse(remaining)
info, remaining = parser.feed(b'\r\n')
self.assertFalse(remaining)
self.assertIsInstance(info.source, address.IPv4Address)
self.assertEqual(info.source.host, b'127.0.0.1')
self.assertEqual(info.source.port, 8080)
self.assertEqual(info.destination.host, b'127.0.0.1')
self.assertEqual(info.destination.port, 8888)
def test_feedParsingTooLong(self):
"""
Test that parsing fails if no newline is found in 108 bytes.
"""
parser = _v1parser.V1Parser()
info, remaining = parser.feed(b'PROXY TCP4 127.0.0.1 127.0.0.1 ')
self.assertFalse(info)
self.assertFalse(remaining)
info, remaining = parser.feed(b'8080 8888')
self.assertFalse(info)
self.assertFalse(remaining)
self.assertRaises(
InvalidProxyHeader,
parser.feed,
b' ' * 100,
)
def test_feedParsingOverflow(self):
"""
Test that parsing leaves overflow bytes in the buffer.
"""
parser = _v1parser.V1Parser()
info, remaining = parser.feed(
b'PROXY TCP4 127.0.0.1 127.0.0.1 8080 8888\r\nHTTP/1.1 GET /\r\n',
)
self.assertTrue(info)
self.assertEqual(remaining, b'HTTP/1.1 GET /\r\n')
self.assertFalse(parser.buffer)
| gpl-2.0 |
ltilve/chromium | tools/auto_bisect/bisect_utils.py | 8 | 16817 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions used by the bisect tool.
This includes functions related to checking out the depot and outputting
annotations for the Buildbot waterfall.
"""
import errno
import imp
import os
import stat
import subprocess
import sys
DEFAULT_GCLIENT_CUSTOM_DEPS = {
'src/data/page_cycler': 'https://chrome-internal.googlesource.com/'
'chrome/data/page_cycler/.git',
'src/data/dom_perf': 'https://chrome-internal.googlesource.com/'
'chrome/data/dom_perf/.git',
'src/data/mach_ports': 'https://chrome-internal.googlesource.com/'
'chrome/data/mach_ports/.git',
'src/tools/perf/data': 'https://chrome-internal.googlesource.com/'
'chrome/tools/perf/data/.git',
'src/third_party/adobe/flash/binaries/ppapi/linux':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/linux/.git',
'src/third_party/adobe/flash/binaries/ppapi/linux_x64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/linux_x64/.git',
'src/third_party/adobe/flash/binaries/ppapi/mac':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/mac/.git',
'src/third_party/adobe/flash/binaries/ppapi/mac_64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/mac_64/.git',
'src/third_party/adobe/flash/binaries/ppapi/win':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/win/.git',
'src/third_party/adobe/flash/binaries/ppapi/win_x64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/win_x64/.git',
'src/chrome/tools/test/reference_build/chrome_win': None,
'src/chrome/tools/test/reference_build/chrome_mac': None,
'src/chrome/tools/test/reference_build/chrome_linux': None,
'src/third_party/WebKit/LayoutTests': None,
'src/tools/valgrind': None,
}
GCLIENT_SPEC_DATA = [
{
'name': 'src',
'url': 'https://chromium.googlesource.com/chromium/src.git',
'deps_file': '.DEPS.git',
'managed': True,
'custom_deps': {},
'safesync_url': '',
},
]
GCLIENT_SPEC_ANDROID = "\ntarget_os = ['android']"
GCLIENT_CUSTOM_DEPS_V8 = {
'src/v8_bleeding_edge': 'https://chromium.googlesource.com/v8/v8.git'
}
FILE_DEPS_GIT = '.DEPS.git'
FILE_DEPS = 'DEPS'
# Bisect working directory.
BISECT_DIR = 'bisect'
# The percentage at which confidence is considered high.
HIGH_CONFIDENCE = 95
# Below is the map of "depot" names to information about each depot. Each depot
# is a repository, and in the process of bisecting, revision ranges in these
# repositories may also be bisected.
#
# Each depot information dictionary may contain:
# src: Path to the working directory.
# recurse: True if this repository will get bisected.
# svn: URL of SVN repository. Needed for git workflow to resolve hashes to
# SVN revisions.
# from: Parent depot that must be bisected before this is bisected.
# deps_var: Key name in vars variable in DEPS file that has revision
# information.
DEPOT_DEPS_NAME = {
'chromium': {
'src': 'src',
'recurse': True,
'from': ['android-chrome'],
'viewvc': 'https://chromium.googlesource.com/chromium/src/+/',
'deps_var': 'chromium_rev'
},
'webkit': {
'src': 'src/third_party/WebKit',
'recurse': True,
'from': ['chromium'],
'viewvc': 'https://chromium.googlesource.com/chromium/blink/+/',
'deps_var': 'webkit_revision'
},
'angle': {
'src': 'src/third_party/angle',
'src_old': 'src/third_party/angle_dx11',
'recurse': True,
'from': ['chromium'],
'platform': 'nt',
'viewvc': 'https://chromium.googlesource.com/angle/angle/+/',
'deps_var': 'angle_revision'
},
'v8': {
'src': 'src/v8',
'recurse': True,
'from': ['chromium'],
'custom_deps': GCLIENT_CUSTOM_DEPS_V8,
'viewvc': 'https://chromium.googlesource.com/v8/v8.git/+/',
'deps_var': 'v8_revision'
},
'v8_bleeding_edge': {
'src': 'src/v8_bleeding_edge',
'recurse': True,
'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
'from': ['v8'],
'viewvc': 'https://chromium.googlesource.com/v8/v8.git/+/',
'deps_var': 'v8_revision'
},
'skia/src': {
'src': 'src/third_party/skia/src',
'recurse': True,
'from': ['chromium'],
'viewvc': 'https://chromium.googlesource.com/skia/+/',
'deps_var': 'skia_revision'
}
}
DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
# The possible values of the --bisect_mode flag, which determines what to
# use when classifying a revision as "good" or "bad".
BISECT_MODE_MEAN = 'mean'
BISECT_MODE_STD_DEV = 'std_dev'
BISECT_MODE_RETURN_CODE = 'return_code'
def AddAdditionalDepotInfo(depot_info):
"""Adds additional depot info to the global depot variables."""
global DEPOT_DEPS_NAME
global DEPOT_NAMES
DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items())
DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
def OutputAnnotationStepStart(name):
"""Outputs annotation to signal the start of a step to a try bot.
Args:
name: The name of the step.
"""
print
print '@@@SEED_STEP %s@@@' % name
print '@@@STEP_CURSOR %s@@@' % name
print '@@@STEP_STARTED@@@'
print
sys.stdout.flush()
def OutputAnnotationStepClosed():
"""Outputs annotation to signal the closing of a step to a try bot."""
print
print '@@@STEP_CLOSED@@@'
print
sys.stdout.flush()
def OutputAnnotationStepText(text):
"""Outputs appropriate annotation to print text.
Args:
name: The text to print.
"""
print
print '@@@STEP_TEXT@%s@@@' % text
print
sys.stdout.flush()
def OutputAnnotationStepWarning():
"""Outputs appropriate annotation to signal a warning."""
print
print '@@@STEP_WARNINGS@@@'
print
def OutputAnnotationStepFailure():
"""Outputs appropriate annotation to signal a warning."""
print
print '@@@STEP_FAILURE@@@'
print
def OutputAnnotationStepLink(label, url):
"""Outputs appropriate annotation to print a link.
Args:
label: The name to print.
url: The URL to print.
"""
print
print '@@@STEP_LINK@%s@%s@@@' % (label, url)
print
sys.stdout.flush()
def LoadExtraSrc(path_to_file):
"""Attempts to load an extra source file, and overrides global values.
If the extra source file is loaded successfully, then it will use the new
module to override some global values, such as gclient spec data.
Args:
path_to_file: File path.
Returns:
The loaded module object, or None if none was imported.
"""
try:
global GCLIENT_SPEC_DATA
global GCLIENT_SPEC_ANDROID
extra_src = imp.load_source('data', path_to_file)
GCLIENT_SPEC_DATA = extra_src.GetGClientSpec()
GCLIENT_SPEC_ANDROID = extra_src.GetGClientSpecExtraParams()
return extra_src
except ImportError:
return None
def IsTelemetryCommand(command):
"""Attempts to discern whether or not a given command is running telemetry."""
return 'tools/perf/run_' in command or 'tools\\perf\\run_' in command
def _CreateAndChangeToSourceDirectory(working_directory):
"""Creates a directory 'bisect' as a subdirectory of |working_directory|.
If successful, the current working directory will be changed to the new
'bisect' directory.
Args:
working_directory: The directory to create the new 'bisect' directory in.
Returns:
True if the directory was successfully created (or already existed).
"""
cwd = os.getcwd()
os.chdir(working_directory)
try:
os.mkdir(BISECT_DIR)
except OSError, e:
if e.errno != errno.EEXIST: # EEXIST indicates that it already exists.
os.chdir(cwd)
return False
os.chdir(BISECT_DIR)
return True
def _SubprocessCall(cmd, cwd=None):
"""Runs a command in a subprocess.
Args:
cmd: The command to run.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
if os.name == 'nt':
# "HOME" isn't normally defined on windows, but is needed
# for git to find the user's .netrc file.
if not os.getenv('HOME'):
os.environ['HOME'] = os.environ['USERPROFILE']
shell = os.name == 'nt'
return subprocess.call(cmd, shell=shell, cwd=cwd)
def RunGClient(params, cwd=None):
"""Runs gclient with the specified parameters.
Args:
params: A list of parameters to pass to gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
cmd = ['gclient'] + params
return _SubprocessCall(cmd, cwd=cwd)
def RunGClientAndCreateConfig(opts, custom_deps=None, cwd=None):
"""Runs gclient and creates a config containing both src and src-internal.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
spec = GCLIENT_SPEC_DATA
if custom_deps:
for k, v in custom_deps.iteritems():
spec[0]['custom_deps'][k] = v
# Cannot have newlines in string on windows
spec = 'solutions =' + str(spec)
spec = ''.join([l for l in spec.splitlines()])
if 'android' in opts.target_platform:
spec += GCLIENT_SPEC_ANDROID
return_code = RunGClient(
['config', '--spec=%s' % spec], cwd=cwd)
return return_code
def OnAccessError(func, path, _):
"""Error handler for shutil.rmtree.
Source: http://goo.gl/DEYNCT
If the error is due to an access error (read only file), it attempts to add
write permissions, then retries.
If the error is for another reason it re-raises the error.
Args:
func: The function that raised the error.
path: The path name passed to func.
_: Exception information from sys.exc_info(). Not used.
"""
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _CleanupPreviousGitRuns(cwd=os.getcwd()):
"""Cleans up any leftover index.lock files after running git."""
# If a previous run of git crashed, or bot was reset, etc., then we might
# end up with leftover index.lock files.
for path, _, files in os.walk(cwd):
for cur_file in files:
if cur_file.endswith('index.lock'):
path_to_file = os.path.join(path, cur_file)
os.remove(path_to_file)
def RunGClientAndSync(revisions=None, cwd=None):
"""Runs gclient and does a normal sync.
Args:
revisions: List of revisions that need to be synced.
E.g., "src@2ae43f...", "src/third_party/webkit@asr1234" etc.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
params = ['sync', '--verbose', '--nohooks', '--force',
'--delete_unversioned_trees']
if revisions is not None:
for revision in revisions:
if revision is not None:
params.extend(['--revision', revision])
return RunGClient(params, cwd=cwd)
def SetupGitDepot(opts, custom_deps):
"""Sets up the depot for the bisection.
The depot will be located in a subdirectory called 'bisect'.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
Returns:
True if gclient successfully created the config file and did a sync, False
otherwise.
"""
name = 'Setting up Bisection Depot'
try:
if opts.output_buildbot_annotations:
OutputAnnotationStepStart(name)
if RunGClientAndCreateConfig(opts, custom_deps):
return False
_CleanupPreviousGitRuns()
RunGClient(['revert'])
return not RunGClientAndSync()
finally:
if opts.output_buildbot_annotations:
OutputAnnotationStepClosed()
def CheckIfBisectDepotExists(opts):
"""Checks if the bisect directory already exists.
Args:
opts: The options parsed from the command line through parse_args().
Returns:
Returns True if it exists.
"""
path_to_dir = os.path.join(opts.working_directory, BISECT_DIR, 'src')
return os.path.exists(path_to_dir)
def CheckRunGit(command, cwd=None):
"""Run a git subcommand, returning its output and return code. Asserts if
the return code of the call is non-zero.
Args:
command: A list containing the args to git.
Returns:
A tuple of the output and return code.
"""
output, return_code = RunGit(command, cwd=cwd)
assert not return_code, 'An error occurred while running'\
' "git %s"' % ' '.join(command)
return output
def RunGit(command, cwd=None):
"""Run a git subcommand, returning its output and return code.
Args:
command: A list containing the args to git.
cwd: A directory to change to while running the git command (optional).
Returns:
A tuple of the output and return code.
"""
command = ['git'] + command
return RunProcessAndRetrieveOutput(command, cwd=cwd)
def CreateBisectDirectoryAndSetupDepot(opts, custom_deps):
"""Sets up a subdirectory 'bisect' and then retrieves a copy of the depot
there using gclient.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
"""
if CheckIfBisectDepotExists(opts):
path_to_dir = os.path.join(os.path.abspath(opts.working_directory),
BISECT_DIR, 'src')
output, _ = RunGit(['rev-parse', '--is-inside-work-tree'], cwd=path_to_dir)
if output.strip() == 'true':
# Before checking out master, cleanup up any leftover index.lock files.
_CleanupPreviousGitRuns(path_to_dir)
# Checks out the master branch, throws an exception if git command fails.
CheckRunGit(['checkout', '-f', 'master'], cwd=path_to_dir)
if not _CreateAndChangeToSourceDirectory(opts.working_directory):
raise RuntimeError('Could not create bisect directory.')
if not SetupGitDepot(opts, custom_deps):
raise RuntimeError('Failed to grab source.')
def RunProcess(command):
"""Runs an arbitrary command.
If output from the call is needed, use RunProcessAndRetrieveOutput instead.
Args:
command: A list containing the command and args to execute.
Returns:
The return code of the call.
"""
# On Windows, use shell=True to get PATH interpretation.
shell = IsWindowsHost()
return subprocess.call(command, shell=shell)
def RunProcessAndRetrieveOutput(command, cwd=None):
"""Runs an arbitrary command, returning its output and return code.
Since output is collected via communicate(), there will be no output until
the call terminates. If you need output while the program runs (ie. so
that the buildbot doesn't terminate the script), consider RunProcess().
Args:
command: A list containing the command and args to execute.
cwd: A directory to change to while running the command. The command can be
relative to this directory. If this is None, the command will be run in
the current directory.
Returns:
A tuple of the output and return code.
"""
if cwd:
original_cwd = os.getcwd()
os.chdir(cwd)
# On Windows, use shell=True to get PATH interpretation.
shell = IsWindowsHost()
proc = subprocess.Popen(
command, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = proc.communicate()
if cwd:
os.chdir(original_cwd)
return (output, proc.returncode)
def IsStringInt(string_to_check):
"""Checks whether or not the given string can be converted to an int."""
try:
int(string_to_check)
return True
except ValueError:
return False
def IsStringFloat(string_to_check):
"""Checks whether or not the given string can be converted to a float."""
try:
float(string_to_check)
return True
except ValueError:
return False
def IsWindowsHost():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def Is64BitWindows():
"""Checks whether or not Windows is a 64-bit version."""
platform = os.environ.get('PROCESSOR_ARCHITEW6432')
if not platform:
# Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct.
platform = os.environ.get('PROCESSOR_ARCHITECTURE')
return platform and platform in ['AMD64', 'I64']
def IsLinuxHost():
return sys.platform.startswith('linux')
def IsMacHost():
return sys.platform.startswith('darwin')
| bsd-3-clause |
chrisburr/staged-recipes | .ci_support/build_all.py | 5 | 5527 | import conda_build.conda_interface
import networkx as nx
import conda_build.api
from compute_build_graph import construct_graph
import argparse
import os
from collections import OrderedDict
import sys
try:
from ruamel_yaml import safe_load, safe_dump
except ImportError:
from yaml import safe_load, safe_dump
def get_host_platform():
from sys import platform
if platform == "linux" or platform == "linux2":
return "linux"
elif platform == "darwin":
return "osx"
elif platform == "win32":
return "win"
def build_all(recipes_dir, arch):
folders = os.listdir(recipes_dir)
old_comp_folders = []
new_comp_folders = []
if not folders:
print("Found no recipes to build")
return
for folder in folders:
built = False
cbc = os.path.join(recipes_dir, folder, "conda_build_config.yaml")
if os.path.exists(cbc):
with open(cbc, "r") as f:
text = ''.join(f.readlines())
if 'channel_sources' in text:
specific_config = safe_load(text)
if "channel_targets" not in specific_config:
raise RuntimeError("channel_targets not found in {}".format(folder))
if "channel_sources" in specific_config:
for row in specific_config["channel_sources"]:
channels = [c.strip() for c in row.split(",")]
if channels != ['conda-forge', 'defaults'] and \
channels != ['conda-forge/label/cf201901', 'defaults']:
print("Not a standard configuration of channel_sources. Building {} individually.".format(folder))
conda_build.api.build([os.path.join(recipes_dir, folder)], config=get_config(arch, channels))
built = True
break
if not built:
old_comp_folders.append(folder)
continue
if not built:
new_comp_folders.append(folder)
if old_comp_folders:
print("Building {} with conda-forge/label/cf201901".format(','.join(old_comp_folders)))
channel_urls = ['local', 'conda-forge/label/cf201901', 'defaults']
build_folders(recipes_dir, old_comp_folders, arch, channel_urls)
if new_comp_folders:
print("Building {} with conda-forge/label/main".format(','.join(new_comp_folders)))
channel_urls = ['local', 'conda-forge', 'defaults']
build_folders(recipes_dir, new_comp_folders, arch, channel_urls)
def get_config(arch, channel_urls):
exclusive_config_file = os.path.join(conda_build.conda_interface.root_dir,
'conda_build_config.yaml')
platform = get_host_platform()
script_dir = os.path.dirname(os.path.realpath(__file__))
variant_config_files = []
variant_config_file = os.path.join(script_dir, '{}{}.yaml'.format(
platform, arch))
if os.path.exists(variant_config_file):
variant_config_files.append(variant_config_file)
error_overlinking = (get_host_platform() != "win")
config = conda_build.api.Config(
variant_config_files=variant_config_files, arch=arch,
exclusive_config_file=exclusive_config_file, channel_urls=channel_urls,
error_overlinking=error_overlinking)
return config
def build_folders(recipes_dir, folders, arch, channel_urls):
index_path = os.path.join(sys.exec_prefix, 'conda-bld')
os.makedirs(index_path, exist_ok=True)
conda_build.api.update_index(index_path)
index = conda_build.conda_interface.get_index(channel_urls=channel_urls)
conda_resolve = conda_build.conda_interface.Resolve(index)
config = get_config(arch, channel_urls)
platform = get_host_platform()
worker = {'platform': platform, 'arch': arch,
'label': '{}-{}'.format(platform, arch)}
G = construct_graph(recipes_dir, worker=worker, run='build',
conda_resolve=conda_resolve, folders=folders,
config=config, finalize=False)
order = list(nx.topological_sort(G))
order.reverse()
print('Computed that there are {} distributions to build from {} recipes'
.format(len(order), len(folders)))
if not order:
print('Nothing to do')
return
print("Resolved dependencies, will be built in the following order:")
print(' '+'\n '.join(order))
d = OrderedDict()
for node in order:
d[G.node[node]['meta'].meta_path] = 1
for recipe in d.keys():
conda_build.api.build([recipe], config=get_config(arch, channel_urls))
def check_recipes_in_correct_dir(root_dir, correct_dir):
from pathlib import Path
for path in Path(root_dir).rglob('meta.yaml'):
path = path.absolute().relative_to(root_dir)
if path.parts[0] != correct_dir:
raise RuntimeError(f"recipe {path.parts} in wrong directory")
if len(path.parts) != 3:
raise RuntimeError(f"recipe {path.parts} in wrong directory")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--arch', default='64',
help='target architecture (64 or 32)')
args = parser.parse_args()
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
check_recipes_in_correct_dir(root_dir, "recipes")
build_all(os.path.join(root_dir, "recipes"), args.arch)
| bsd-3-clause |
asottile/pushmanager | pushmanager/servlets/undelayrequest.py | 2 | 1123 | import sqlalchemy as SA
import tornado.web
import pushmanager.core.db as db
from pushmanager.core.requesthandler import RequestHandler
import pushmanager.core.util
class UndelayRequestServlet(RequestHandler):
@tornado.web.asynchronous
def post(self):
if not self.current_user:
return self.send_error(403)
self.requestid = pushmanager.core.util.get_int_arg(self.request, 'id')
update_query = db.push_requests.update().where(SA.and_(
db.push_requests.c.id == self.requestid,
db.push_requests.c.user == self.current_user,
db.push_requests.c.state == 'delayed',
)).values({
'state': 'requested',
})
select_query = db.push_requests.select().where(
db.push_requests.c.id == self.requestid,
)
db.execute_transaction_cb([update_query, select_query], self.on_db_complete)
# allow both GET and POST
get = post
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
self.redirect("/requests?user=%s" % self.current_user)
| apache-2.0 |
repology/repology | repology/parsers/parsers/slackbuilds.py | 1 | 3834 | # Copyright (C) 2016-2019 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import os
from typing import Dict, Iterable, Optional, Tuple
from repology.logger import Logger
from repology.packagemaker import NameType, PackageFactory, PackageMaker
from repology.parsers import Parser
from repology.parsers.maintainers import extract_maintainers
from repology.transformer import PackageTransformer
def _iter_packages(path: str) -> Iterable[Tuple[str, str]]:
for category in os.listdir(path):
if category.startswith('.'):
continue
category_path = os.path.join(path, category)
if not os.path.isdir(category_path):
continue
for package in os.listdir(category_path):
package_path = os.path.join(category_path, package)
if not os.path.isdir(package_path):
continue
yield category, package
def _parse_infofile(path: str) -> Dict[str, str]:
variables: Dict[str, str] = {}
with open(path, encoding='utf-8', errors='ignore') as infofile:
key: Optional[str] = None
total_value = []
for line in infofile:
line = line.strip()
if not line:
continue
if key: # continued
value = line
else: # new variable
key, value = line.split('=', 1)
value = value.lstrip('"').lstrip()
if value.endswith('\\'): # will continue
total_value.append(value.rstrip('\\').rstrip())
elif not value or value.endswith('"'):
total_value.append(value.rstrip('"').rstrip())
variables[key] = ' '.join(total_value)
key = None
total_value = []
return variables
class SlackBuildsParser(Parser):
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]:
for category, pkgname in _iter_packages(path):
with factory.begin(category + '/' + pkgname) as pkg:
info_path = os.path.join(path, category, pkgname, pkgname + '.info')
if not os.path.isfile(info_path):
pkg.log('.info file does not exist', severity=Logger.ERROR)
continue
pkg.add_categories(category)
variables = _parse_infofile(info_path)
if variables['PRGNAM'] != pkgname:
pkg.log(f'PRGNAM "{variables["PRGNAM"]}" != pkgname "{pkgname}"', severity=Logger.ERROR)
continue
pkg.add_name(variables['PRGNAM'], NameType.SLACKBUILDS_NAME)
pkg.add_name(category + '/' + pkgname, NameType.SLACKBUILDS_FULL_NAME)
pkg.set_version(variables['VERSION'])
pkg.add_homepages(variables['HOMEPAGE'])
pkg.add_maintainers(extract_maintainers(variables['EMAIL']))
for key in ['DOWNLOAD', 'DOWNLOAD_x86_64']:
if variables[key] not in ['', 'UNSUPPORTED', 'UNTESTED']:
pkg.add_downloads(variables[key].split())
yield pkg
| gpl-3.0 |
factorlibre/OCB | addons/account/wizard/account_state_open.py | 341 | 1785 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_state_open(osv.osv_memory):
_name = 'account.state.open'
_description = 'Account State Open'
def change_inv_state(self, cr, uid, ids, context=None):
proxy = self.pool.get('account.invoice')
if context is None:
context = {}
active_ids = context.get('active_ids')
if isinstance(active_ids, list):
invoice = proxy.browse(cr, uid, active_ids[0], context=context)
if invoice.reconciled:
raise osv.except_osv(_('Warning!'), _('Invoice is already reconciled.'))
invoice.signal_workflow('open_test')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shurain/autokey | src/lib/macro.py | 47 | 4078 | from iomediator import KEY_SPLIT_RE, Key
import common
if common.USING_QT:
from PyKDE4.kdecore import ki18n
from PyKDE4.kdeui import KMenu, KAction
from PyQt4.QtCore import SIGNAL
_ = ki18n
class MacroAction(KAction):
def __init__(self, menu, macro, callback):
KAction.__init__(self, macro.TITLE.toString(), menu)
self.macro = macro
self.callback = callback
self.connect(self, SIGNAL("triggered()"), self.on_triggered)
def on_triggered(self):
self.callback(self.macro)
else:
from gi.repository import Gtk
class MacroManager:
def __init__(self, engine):
self.macros = []
self.macros.append(ScriptMacro(engine))
self.macros.append(DateMacro())
self.macros.append(FileContentsMacro())
self.macros.append(CursorMacro())
def get_menu(self, callback, menu=None):
if common.USING_QT:
for macro in self.macros:
menu.addAction(MacroAction(menu, macro, callback))
else:
menu = Gtk.Menu()
for macro in self.macros:
menuItem = Gtk.MenuItem(macro.TITLE)
menuItem.connect("activate", callback, macro)
menu.append(menuItem)
menu.show_all()
return menu
def process_expansion(self, expansion):
parts = KEY_SPLIT_RE.split(expansion.string)
for macro in self.macros:
macro.process(parts)
expansion.string = ''.join(parts)
class AbstractMacro:
def get_token(self):
ret = "<%s" % self.ID
if len(self.ARGS) == 0:
ret += ">"
else:
for k, v in self.ARGS:
ret += " "
ret += k
ret += "="
ret += ">"
return ret
def _can_process(self, token):
if KEY_SPLIT_RE.match(token):
return token[1:-1].split(' ', 1)[0] == self.ID
else:
return False
def _get_args(self, token):
l = token[:-1].split(' ')
ret = {}
if len(l) > 1:
for arg in l[1:]:
key, val = arg.split('=', 1)
ret[key] = val
for k, v in self.ARGS:
if k not in ret:
raise Exception("Missing mandatory argument '%s' for macro '%s'" % (k, self.ID))
return ret
def process(self, parts):
for i in xrange(len(parts)):
if self._can_process(parts[i]):
self.do_process(parts, i)
class CursorMacro(AbstractMacro):
ID = "cursor"
TITLE = _("Position cursor")
ARGS = []
def do_process(self, parts, i):
try:
lefts = len(''.join(parts[i+1:]))
parts.append(Key.LEFT * lefts)
parts[i] = ''
except IndexError:
pass
class ScriptMacro(AbstractMacro):
ID = "script"
TITLE = _("Run script")
ARGS = [("name", _("Name")),
("args", _("Arguments (comma separated)"))]
def __init__(self, engine):
self.engine = engine
def do_process(self, parts, i):
args = self._get_args(parts[i])
self.engine.run_script_from_macro(args)
parts[i] = self.engine.get_return_value()
class DateMacro(AbstractMacro):
ID = "date"
TITLE = _("Insert date")
ARGS = [("format", _("Format"))]
def do_process(self, parts, i):
format = self._get_args(parts[i])["format"]
date = datetime.datetime.now().strftime(format)
parts[i] = date
class FileContentsMacro(AbstractMacro):
ID = "file"
TITLE = _("Insert file contents")
ARGS = [("name", _("File name"))]
def do_process(self, parts, i):
name = self._get_args(parts[i])["name"]
with open(name, "r") as inputFile:
parts[i] = inputFile.read()
| gpl-3.0 |
Comcast/neutron | neutron/plugins/hyperv/agent/utilsfactory.py | 9 | 2115 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Claudiu Belu, Cloudbase Solutions Srl
import sys
from oslo.config import cfg
from neutron.openstack.common import log as logging
from neutron.plugins.hyperv.agent import utils
from neutron.plugins.hyperv.agent import utilsv2
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import wmi
hyper_opts = [
cfg.BoolOpt('force_hyperv_utils_v1',
default=False,
help=_('Force V1 WMI utility classes')),
]
CONF = cfg.CONF
CONF.register_opts(hyper_opts, 'hyperv')
LOG = logging.getLogger(__name__)
def _get_windows_version():
return wmi.WMI(moniker='//./root/cimv2').Win32_OperatingSystem()[0].Version
def _check_min_windows_version(major, minor, build=0):
version_str = _get_windows_version()
return map(int, version_str.split('.')) >= [major, minor, build]
def _get_class(v1_class, v2_class, force_v1_flag):
# V2 classes are supported starting from Hyper-V Server 2012 and
# Windows Server 2012 (kernel version 6.2)
if not force_v1_flag and _check_min_windows_version(6, 2):
cls = v2_class
else:
cls = v1_class
LOG.debug(_("Loading class: %(module_name)s.%(class_name)s"),
{'module_name': cls.__module__, 'class_name': cls.__name__})
return cls
def get_hypervutils():
return _get_class(utils.HyperVUtils, utilsv2.HyperVUtilsV2,
CONF.hyperv.force_hyperv_utils_v1)()
| apache-2.0 |
danalec/dotfiles | sublime/.config/sublime-text-3/Packages/Anaconda/anaconda_server/handlers/jedi_handler.py | 2 | 2623 | # -*- coding: utf8 -*-
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import jedi
from lib.anaconda_handler import AnacondaHandler
from jedi import refactoring as jedi_refactor
from commands import Doc, Goto, GotoAssignment, Rename, FindUsages
from commands import CompleteParameters, AutoComplete
logger = logging.getLogger('')
class JediHandler(AnacondaHandler):
"""Handle requests to execute Jedi related commands to the JsonServer
The JsonServer instantiate an object of this class passing the method
to execute as it came from the Sublime Text 3 Anaconda plugin
"""
def run(self):
"""Call the specific method (override base class)
"""
self.real_callback = self.callback
self.callback = self.handle_result_and_purge_cache
super(JediHandler, self).run()
def handle_result_and_purge_cache(self, result):
"""Handle the result from the call and purge in memory jedi cache
"""
try:
jedi.cache.clear_time_caches()
except:
jedi.cache.clear_caches()
self.real_callback(result)
@property
def script(self):
"""Generates a new valid Jedi Script and return it back
"""
return self.jedi_script(**self.data)
def jedi_script(
self, source, line, offset, filename='', encoding='utf8', **kw):
"""Generate an usable Jedi Script
"""
return jedi.Script(source, int(line), int(offset), filename, encoding)
def rename(self, directories, new_word):
"""Rename the object under the cursor by the given word
"""
Rename(
self.callback, self.uid, self.script,
directories, new_word, jedi_refactor
)
def autocomplete(self):
"""Call autocomplete
"""
AutoComplete(self.callback, self.uid, self.script)
def parameters(self, settings):
"""Call complete parameter
"""
CompleteParameters(self.callback, self.uid, self.script, settings)
def usages(self):
"""Call find usages
"""
FindUsages(self.callback, self.uid, self.script)
def goto(self):
"""Call goto
"""
Goto(self.callback, self.uid, self.script)
def goto_assignment(self):
"""Call goto_assignment
"""
GotoAssignment(self.callback, self.uid, self.script)
def doc(self, html=False):
"""Call doc
"""
Doc(self.callback, self.uid, self.script, html)
| mit |
pschmitt/home-assistant | homeassistant/components/ffmpeg/camera.py | 10 | 2737 | """Support for Cameras with FFmpeg as decoder."""
import asyncio
import logging
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_STREAM, Camera
from homeassistant.const import CONF_NAME
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
import homeassistant.helpers.config_validation as cv
from . import CONF_EXTRA_ARGUMENTS, CONF_INPUT, DATA_FFMPEG
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "FFmpeg"
DEFAULT_ARGUMENTS = "-pred 1"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_INPUT): cv.string,
vol.Optional(CONF_EXTRA_ARGUMENTS, default=DEFAULT_ARGUMENTS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a FFmpeg camera."""
async_add_entities([FFmpegCamera(hass, config)])
class FFmpegCamera(Camera):
"""An implementation of an FFmpeg camera."""
def __init__(self, hass, config):
"""Initialize a FFmpeg camera."""
super().__init__()
self._manager = hass.data[DATA_FFMPEG]
self._name = config.get(CONF_NAME)
self._input = config.get(CONF_INPUT)
self._extra_arguments = config.get(CONF_EXTRA_ARGUMENTS)
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_STREAM
async def stream_source(self):
"""Return the stream source."""
return self._input.split(" ")[-1]
async def async_camera_image(self):
"""Return a still image response from the camera."""
ffmpeg = ImageFrame(self._manager.binary, loop=self.hass.loop)
image = await asyncio.shield(
ffmpeg.get_image(
self._input, output_format=IMAGE_JPEG, extra_cmd=self._extra_arguments
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
stream = CameraMjpeg(self._manager.binary, loop=self.hass.loop)
await stream.open_camera(self._input, extra_cmd=self._extra_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
self._manager.ffmpeg_stream_content_type,
)
finally:
await stream.close()
@property
def name(self):
"""Return the name of this camera."""
return self._name
| apache-2.0 |
Zeken/audacity | lib-src/lv2/lv2/plugins/eg-amp.lv2/waflib/Tools/c_tests.py | 330 | 4280 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task
from waflib.Configure import conf
from waflib.TaskGen import feature,before_method,after_method
import sys
LIB_CODE='''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllexport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void) { return 9; }
'''
MAIN_CODE='''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllimport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void);
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(lib_func() == 9);
}
'''
@feature('link_lib_test')
@before_method('process_source')
def link_lib_test_fun(self):
def write_test_file(task):
task.outputs[0].write(task.generator.code)
rpath=[]
if getattr(self,'add_rpath',False):
rpath=[self.bld.path.get_bld().abspath()]
mode=self.mode
m='%s %s'%(mode,mode)
ex=self.test_exec and'test_exec'or''
bld=self.bld
bld(rule=write_test_file,target='test.'+mode,code=LIB_CODE)
bld(rule=write_test_file,target='main.'+mode,code=MAIN_CODE)
bld(features='%sshlib'%m,source='test.'+mode,target='test')
bld(features='%sprogram %s'%(m,ex),source='main.'+mode,target='app',use='test',rpath=rpath)
@conf
def check_library(self,mode=None,test_exec=True):
if not mode:
mode='c'
if self.env.CXX:
mode='cxx'
self.check(compile_filename=[],features='link_lib_test',msg='Checking for libraries',mode=mode,test_exec=test_exec,)
INLINE_CODE='''
typedef int foo_t;
static %s foo_t static_foo () {return 0; }
%s foo_t foo () {
return 0;
}
'''
INLINE_VALUES=['inline','__inline__','__inline']
@conf
def check_inline(self,**kw):
self.start_msg('Checking for inline')
if not'define_name'in kw:
kw['define_name']='INLINE_MACRO'
if not'features'in kw:
if self.env.CXX:
kw['features']=['cxx']
else:
kw['features']=['c']
for x in INLINE_VALUES:
kw['fragment']=INLINE_CODE%(x,x)
try:
self.check(**kw)
except self.errors.ConfigurationError:
continue
else:
self.end_msg(x)
if x!='inline':
self.define('inline',x,quote=False)
return x
self.fatal('could not use inline functions')
LARGE_FRAGMENT='''#include <unistd.h>
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(sizeof(off_t) >= 8);
}
'''
@conf
def check_large_file(self,**kw):
if not'define_name'in kw:
kw['define_name']='HAVE_LARGEFILE'
if not'execute'in kw:
kw['execute']=True
if not'features'in kw:
if self.env.CXX:
kw['features']=['cxx','cxxprogram']
else:
kw['features']=['c','cprogram']
kw['fragment']=LARGE_FRAGMENT
kw['msg']='Checking for large file support'
ret=True
try:
if self.env.DEST_BINFMT!='pe':
ret=self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
if ret:
return True
kw['msg']='Checking for -D_FILE_OFFSET_BITS=64'
kw['defines']=['_FILE_OFFSET_BITS=64']
try:
ret=self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
self.define('_FILE_OFFSET_BITS',64)
return ret
self.fatal('There is no support for large files')
ENDIAN_FRAGMENT='''
short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
int use_ascii (int i) {
return ascii_mm[i] + ascii_ii[i];
}
short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
int use_ebcdic (int i) {
return ebcdic_mm[i] + ebcdic_ii[i];
}
extern int foo;
'''
class grep_for_endianness(Task.Task):
color='PINK'
def run(self):
txt=self.inputs[0].read(flags='rb').decode('iso8859-1')
if txt.find('LiTTleEnDian')>-1:
self.generator.tmp.append('little')
elif txt.find('BIGenDianSyS')>-1:
self.generator.tmp.append('big')
else:
return-1
@feature('grep_for_endianness')
@after_method('process_source')
def grep_for_endianness_fun(self):
self.create_task('grep_for_endianness',self.compiled_tasks[0].outputs[0])
@conf
def check_endianness(self):
tmp=[]
def check_msg(self):
return tmp[0]
self.check(fragment=ENDIAN_FRAGMENT,features='c grep_for_endianness',msg="Checking for endianness",define='ENDIANNESS',tmp=tmp,okmsg=check_msg)
return tmp[0]
| gpl-2.0 |
eoncloud-dev/eonboard | eoncloud_web/biz/volume/serializer.py | 4 | 1116 | #-*-coding-utf-8-*-
from django.contrib.auth.models import User
from rest_framework import serializers
from biz.idc.models import UserDataCenter
from biz.volume.models import Volume
from biz.instance.serializer import InstanceSerializer
class VolumeSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), required=False, allow_null=True, default=None)
user_data_center = serializers.PrimaryKeyRelatedField(queryset=UserDataCenter.objects.all(), required=False, allow_null=True, default=None)
instance_info = InstanceSerializer(source="instance", required=False, allow_null=True, default=None, read_only=True)
create_date = serializers.DateTimeField(format="%Y-%m-%d %H:%M", required=False, allow_null=True)
def validate_user(self, value):
request = self.context.get('request', None)
return request.user
def validate_user_data_center(self, value):
request = self.context.get('request', None)
return UserDataCenter.objects.get(pk=request.session["UDC_ID"])
class Meta:
model = Volume
| apache-2.0 |
ryfeus/lambda-packs | Lxml_requests/source/requests/packages/chardet/jpcntx.py | 1777 | 19348 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| mit |
google/material-design-icons | update/venv/lib/python3.9/site-packages/pip/_internal/network/download.py | 6 | 6243 | """Download files with progress indicators.
"""
import cgi
import logging
import mimetypes
import os
from typing import Iterable, Optional, Tuple
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._internal.cli.progress_bars import DownloadProgressProvider
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.models.index import PyPI
from pip._internal.models.link import Link
from pip._internal.network.cache import is_from_cache
from pip._internal.network.session import PipSession
from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks
from pip._internal.utils.misc import format_size, redact_auth_from_url, splitext
logger = logging.getLogger(__name__)
def _get_http_response_size(resp):
# type: (Response) -> Optional[int]
try:
return int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
return None
def _prepare_download(
resp, # type: Response
link, # type: Link
progress_bar # type: str
):
# type: (...) -> Iterable[bytes]
total_length = _get_http_response_size(resp)
if link.netloc == PyPI.file_storage_domain:
url = link.show_url
else:
url = link.url_without_fragment
logged_url = redact_auth_from_url(url)
if total_length:
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
if is_from_cache(resp):
logger.info("Using cached %s", logged_url)
else:
logger.info("Downloading %s", logged_url)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif is_from_cache(resp):
show_progress = False
elif not total_length:
show_progress = True
elif total_length > (40 * 1000):
show_progress = True
else:
show_progress = False
chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
if not show_progress:
return chunks
return DownloadProgressProvider(
progress_bar, max=total_length
)(chunks)
def sanitize_content_filename(filename):
# type: (str) -> str
"""
Sanitize the "filename" value from a Content-Disposition header.
"""
return os.path.basename(filename)
def parse_content_disposition(content_disposition, default_filename):
# type: (str, str) -> str
"""
Parse the "filename" value from a Content-Disposition header, and
return the default filename if the result is empty.
"""
_type, params = cgi.parse_header(content_disposition)
filename = params.get('filename')
if filename:
# We need to sanitize the filename to prevent directory traversal
# in case the filename contains ".." path parts.
filename = sanitize_content_filename(filename)
return filename or default_filename
def _get_http_response_filename(resp, link):
# type: (Response, Link) -> str
"""Get an ideal filename from the given HTTP response, falling back to
the link filename if not provided.
"""
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition, filename)
ext = splitext(filename)[1] # type: Optional[str]
if not ext:
ext = mimetypes.guess_extension(
resp.headers.get('content-type', '')
)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
return filename
def _http_get_download(session, link):
# type: (PipSession, Link) -> Response
target_url = link.url.split('#', 1)[0]
resp = session.get(target_url, headers=HEADERS, stream=True)
raise_for_status(resp)
return resp
class Downloader:
def __init__(
self,
session, # type: PipSession
progress_bar, # type: str
):
# type: (...) -> None
self._session = session
self._progress_bar = progress_bar
def __call__(self, link, location):
# type: (Link, str) -> Tuple[str, str]
"""Download the file given by link into location."""
try:
resp = _http_get_download(self._session, link)
except NetworkConnectionError as e:
assert e.response is not None
logger.critical(
"HTTP error %s while getting %s", e.response.status_code, link
)
raise
filename = _get_http_response_filename(resp, link)
filepath = os.path.join(location, filename)
chunks = _prepare_download(resp, link, self._progress_bar)
with open(filepath, 'wb') as content_file:
for chunk in chunks:
content_file.write(chunk)
content_type = resp.headers.get('Content-Type', '')
return filepath, content_type
class BatchDownloader:
def __init__(
self,
session, # type: PipSession
progress_bar, # type: str
):
# type: (...) -> None
self._session = session
self._progress_bar = progress_bar
def __call__(self, links, location):
# type: (Iterable[Link], str) -> Iterable[Tuple[Link, Tuple[str, str]]]
"""Download the files given by links into location."""
for link in links:
try:
resp = _http_get_download(self._session, link)
except NetworkConnectionError as e:
assert e.response is not None
logger.critical(
"HTTP error %s while getting %s",
e.response.status_code, link,
)
raise
filename = _get_http_response_filename(resp, link)
filepath = os.path.join(location, filename)
chunks = _prepare_download(resp, link, self._progress_bar)
with open(filepath, 'wb') as content_file:
for chunk in chunks:
content_file.write(chunk)
content_type = resp.headers.get('Content-Type', '')
yield link, (filepath, content_type)
| apache-2.0 |
HM2MC/Webfront | reportlab-2.5/src/reportlab/pdfbase/_fontdata_widths_courierbold.py | 224 | 3664 | widths = {'A': 600,
'AE': 600,
'Aacute': 600,
'Acircumflex': 600,
'Adieresis': 600,
'Agrave': 600,
'Aring': 600,
'Atilde': 600,
'B': 600,
'C': 600,
'Ccedilla': 600,
'D': 600,
'E': 600,
'Eacute': 600,
'Ecircumflex': 600,
'Edieresis': 600,
'Egrave': 600,
'Eth': 600,
'Euro': 600,
'F': 600,
'G': 600,
'H': 600,
'I': 600,
'Iacute': 600,
'Icircumflex': 600,
'Idieresis': 600,
'Igrave': 600,
'J': 600,
'K': 600,
'L': 600,
'Lslash': 600,
'M': 600,
'N': 600,
'Ntilde': 600,
'O': 600,
'OE': 600,
'Oacute': 600,
'Ocircumflex': 600,
'Odieresis': 600,
'Ograve': 600,
'Oslash': 600,
'Otilde': 600,
'P': 600,
'Q': 600,
'R': 600,
'S': 600,
'Scaron': 600,
'T': 600,
'Thorn': 600,
'U': 600,
'Uacute': 600,
'Ucircumflex': 600,
'Udieresis': 600,
'Ugrave': 600,
'V': 600,
'W': 600,
'X': 600,
'Y': 600,
'Yacute': 600,
'Ydieresis': 600,
'Z': 600,
'Zcaron': 600,
'a': 600,
'aacute': 600,
'acircumflex': 600,
'acute': 600,
'adieresis': 600,
'ae': 600,
'agrave': 600,
'ampersand': 600,
'aring': 600,
'asciicircum': 600,
'asciitilde': 600,
'asterisk': 600,
'at': 600,
'atilde': 600,
'b': 600,
'backslash': 600,
'bar': 600,
'braceleft': 600,
'braceright': 600,
'bracketleft': 600,
'bracketright': 600,
'breve': 600,
'brokenbar': 600,
'bullet': 600,
'c': 600,
'caron': 600,
'ccedilla': 600,
'cedilla': 600,
'cent': 600,
'circumflex': 600,
'colon': 600,
'comma': 600,
'copyright': 600,
'currency': 600,
'd': 600,
'dagger': 600,
'daggerdbl': 600,
'degree': 600,
'dieresis': 600,
'divide': 600,
'dollar': 600,
'dotaccent': 600,
'dotlessi': 600,
'e': 600,
'eacute': 600,
'ecircumflex': 600,
'edieresis': 600,
'egrave': 600,
'eight': 600,
'ellipsis': 600,
'emdash': 600,
'endash': 600,
'equal': 600,
'eth': 600,
'exclam': 600,
'exclamdown': 600,
'f': 600,
'fi': 600,
'five': 600,
'fl': 600,
'florin': 600,
'four': 600,
'fraction': 600,
'g': 600,
'germandbls': 600,
'grave': 600,
'greater': 600,
'guillemotleft': 600,
'guillemotright': 600,
'guilsinglleft': 600,
'guilsinglright': 600,
'h': 600,
'hungarumlaut': 600,
'hyphen': 600,
'i': 600,
'iacute': 600,
'icircumflex': 600,
'idieresis': 600,
'igrave': 600,
'j': 600,
'k': 600,
'l': 600,
'less': 600,
'logicalnot': 600,
'lslash': 600,
'm': 600,
'macron': 600,
'minus': 600,
'mu': 600,
'multiply': 600,
'n': 600,
'nine': 600,
'ntilde': 600,
'numbersign': 600,
'o': 600,
'oacute': 600,
'ocircumflex': 600,
'odieresis': 600,
'oe': 600,
'ogonek': 600,
'ograve': 600,
'one': 600,
'onehalf': 600,
'onequarter': 600,
'onesuperior': 600,
'ordfeminine': 600,
'ordmasculine': 600,
'oslash': 600,
'otilde': 600,
'p': 600,
'paragraph': 600,
'parenleft': 600,
'parenright': 600,
'percent': 600,
'period': 600,
'periodcentered': 600,
'perthousand': 600,
'plus': 600,
'plusminus': 600,
'q': 600,
'question': 600,
'questiondown': 600,
'quotedbl': 600,
'quotedblbase': 600,
'quotedblleft': 600,
'quotedblright': 600,
'quoteleft': 600,
'quoteright': 600,
'quotesinglbase': 600,
'quotesingle': 600,
'r': 600,
'registered': 600,
'ring': 600,
's': 600,
'scaron': 600,
'section': 600,
'semicolon': 600,
'seven': 600,
'six': 600,
'slash': 600,
'space': 600,
'sterling': 600,
't': 600,
'thorn': 600,
'three': 600,
'threequarters': 600,
'threesuperior': 600,
'tilde': 600,
'trademark': 600,
'two': 600,
'twosuperior': 600,
'u': 600,
'uacute': 600,
'ucircumflex': 600,
'udieresis': 600,
'ugrave': 600,
'underscore': 600,
'v': 600,
'w': 600,
'x': 600,
'y': 600,
'yacute': 600,
'ydieresis': 600,
'yen': 600,
'z': 600,
'zcaron': 600,
'zero': 600}
| mit |
simleo/pydoop-features | pyfeatures/app/dump.py | 1 | 3709 | # BEGIN_COPYRIGHT
#
# Copyright (C) 2014-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Dump the contents of an Avro container to a different format.
WARNING: the 'pickle' and 'json' formats read the whole Avro container
into memory in order to dump it as a single list, so they're **not**
suitable for very large files.
"""
import cPickle
import json
import os
import pprint
import shelve
import warnings
from contextlib import closing
try:
from pyavroc import AvroFileReader
except ImportError:
from pyfeatures.pyavroc_emu import AvroFileReader
warnings.warn("pyavroc not found, using standard avro lib\n")
FORMATS = "db", "pickle", "txt", "json"
PROTOCOL = cPickle.HIGHEST_PROTOCOL
def iter_records(f, logger, num_records=None):
reader = AvroFileReader(f)
for i, r in enumerate(reader):
logger.debug("record #%d", i)
if num_records is not None and i >= num_records:
raise StopIteration
else:
yield r
class Writer(object):
def __init__(self, fmt, out_fn):
if fmt not in FORMATS:
raise ValueError("Unknown output format: %r" % (fmt,))
self.fmt = fmt
self.out_fn = out_fn
def write(self, records):
getattr(self, "_write_%s" % self.fmt)(records)
def _write_db(self, records):
try:
os.remove(self.out_fn) # shelve.open does not overwrite
except OSError:
pass
with closing(
shelve.open(self.out_fn, flag="n", protocol=PROTOCOL)
) as shelf:
for i, r in enumerate(records):
shelf[str(i)] = r
def _write_pickle(self, records):
with open(self.out_fn, "w") as fo:
cPickle.dump(list(records), fo, PROTOCOL)
def _write_txt(self, records):
with open(self.out_fn, "w") as fo:
pp = pprint.PrettyPrinter(stream=fo)
for r in records:
pp.pprint(r)
def _write_json(self, records):
with open(self.out_fn, "w") as fo:
json.dump(list(records), fo,
sort_keys=True, indent=1, separators=(',', ': '))
def add_parser(subparsers):
parser = subparsers.add_parser("dump", description=__doc__)
parser.add_argument("in_fn", metavar="FILE", help="Avro container file")
parser.add_argument('-n', '--num-records', type=int, metavar='INT',
help="number of records to output (default: all)")
parser.add_argument('-o', '--out-fn', metavar='FILE', help="output file")
parser.add_argument('-f', '--format', choices=FORMATS, default="txt",
metavar="|".join(FORMATS), help="output format")
parser.set_defaults(func=run)
return parser
def run(logger, args, extra_argv=None):
if not args.out_fn:
tag = os.path.splitext(os.path.basename(args.in_fn))[0]
args.out_fn = "%s.%s" % (tag, args.format)
logger.info("writing to %s", args.out_fn)
with open(args.in_fn) as f:
records = iter_records(f, logger, num_records=args.num_records)
Writer(args.format, args.out_fn).write(records)
| apache-2.0 |
asposepdf/Aspose_Pdf_Cloud | Examples/Python/Examples/GetFormFieldCount.py | 2 | 1224 | import asposepdfcloud
from asposepdfcloud.PdfApi import PdfApi
from asposepdfcloud.PdfApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Pdf API SDK
api_client = asposepdfcloud.ApiClient.ApiClient(apiKey, appSid, True)
pdfApi = PdfApi(api_client);
#set input file name
name = "sample-field.pdf"
try:
#upload file to aspose cloud storage
response = storageApi.PutCreate(name, data_folder + name)
#invoke Aspose.Pdf Cloud SDK API to get all of the form fields from the PDF document
response = pdfApi.GetFields(name)
if response.Status == "OK":
count = len(response.Fields.List)
print "Count :: " + str(count)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| mit |
bbayles/vod_metadata | vod_metadata/md_gen.py | 1 | 8226 | import datetime
import os
import random
from vod_metadata import default_template_path
from vod_metadata.vodpackage import VodPackage
__all__ = ["generate_metadata"]
IMAGE_EXTENSIONS = [".bmp", ".jpg"]
def _check_for_ae(ae_type, movie_name, extensions):
ae_safe = ae_type.replace(' ', '_')
ae_paths = ['{}_{}{}'.format(movie_name, ae_safe, x) for x in extensions]
for ae_path in ae_paths:
if os.path.exists(ae_path):
return ae_path
return None
def _set_ae(vod_package, movie_name, ae_type, extensions):
ae_safe = ae_type.replace(' ', '_')
has_ae = getattr(vod_package, 'has_{}'.format(ae_safe))
ae_path = _check_for_ae(ae_type, movie_name, extensions)
if has_ae:
if ae_path is None:
getattr(vod_package, 'remove_{}'.format(ae_safe))()
has_ae = False
else:
vod_package.D_content[ae_type] = ae_path
return has_ae
def generate_metadata(
file_path, vod_config, template_path=default_template_path, timestamp=None
):
# Time-sensitive values
timestamp = datetime.datetime.today() if timestamp is None else timestamp
creation_date = timestamp.strftime("%Y-%m-%d")
end_date = (timestamp + datetime.timedelta(days=999)).strftime("%Y-%m-%d")
asset_id = timestamp.strftime("%Y%m%d%H%M")
# Randomly-generated values
suffix = format(random.randint(0, 9999), "04")
title_billing_id = "{}B".format(suffix)
# Start with a minimal metadata template
vod_package = VodPackage(template_path, vod_config=vod_config)
file_name = os.path.splitext(os.path.split(file_path)[1])[0]
short_name = file_name[:20]
outfile_path = "{}_{}.xml".format(file_name, suffix)
vod_package.xml_path = os.path.join(os.getcwd(), outfile_path)
# File-specific values: looks for a preview of the same type as the movie,
# and a poster / box cover with a suitable extension.
movie_name, movie_ext = os.path.splitext(file_path)
vod_package.D_content["movie"] = file_path
has_preview = _set_ae(vod_package, movie_name, "preview", [movie_ext])
has_poster = _set_ae(vod_package, movie_name, "poster", IMAGE_EXTENSIONS)
has_box_cover = _set_ae(
vod_package, movie_name, "box cover", IMAGE_EXTENSIONS
)
vod_package.check_files()
# Package section
package_asset_name = "{} {} (package)".format(short_name, suffix)
package_description = "{} {} (package asset)".format(short_name, suffix)
package_asset_id = "{}P{}{}".format(vod_config.prefix, asset_id, suffix)
vod_package.D_ams["package"].update(
{
"Provider": vod_config.provider,
"Product": vod_config.product,
"Asset_Name": package_asset_name,
"Version_Major": '1',
"Version_Minor": '0',
"Description": package_description,
"Creation_Date": creation_date,
"Provider_ID": vod_config.provider_id,
"Asset_ID": package_asset_id,
"Asset_Class": "package"
}
)
vod_package.D_app["package"].update(
{"Metadata_Spec_Version": "CableLabsVOD1.1"}
)
# Title section
title_asset_name = "{} {} (title)".format(short_name, suffix)
title_description = "{} {} (title asset)".format(short_name, suffix)
title_asset_id = "{}T{}{}".format(vod_config.prefix, asset_id, suffix)
title_title_brief = "{} {}".format(file_name[:14], suffix)
title_title = "{} {}".format(file_name[:124], suffix)
vod_package.D_ams["title"].update(
{
"Provider": vod_config.provider,
"Product": vod_config.product,
"Asset_Name": title_asset_name,
"Version_Major": '1',
"Version_Minor": '0',
"Description": title_description,
"Creation_Date": creation_date,
"Provider_ID": vod_config.provider_id,
"Asset_ID": title_asset_id,
"Asset_Class": "title"
}
)
vod_package.D_app["title"].update(
{
"Type": "title",
"Title_Brief": title_title_brief,
"Title": title_title,
"Summary_Short": title_title,
"Rating": ["NR"],
"Closed_Captioning": 'N',
"Year": timestamp.strftime("%Y"),
"Category": [vod_config.title_category],
"Genre": ["Other"],
"Show_Type": "Other",
"Billing_ID": title_billing_id,
"Licensing_Window_Start": creation_date,
"Licensing_Window_End": end_date,
"Preview_Period": "300",
"Provider_QA_Contact": "N/A"
}
)
# Movie section
movie_asset_name = "{} {} (movie)".format(short_name, suffix)
movie_description = "{} {} (movie asset)".format(short_name, suffix)
movie_asset_id = "{}M{}{}".format(vod_config.prefix, asset_id, suffix)
vod_package.D_ams["movie"].update(
{
"Provider": vod_config.provider,
"Product": vod_config.product,
"Asset_Name": movie_asset_name,
"Version_Major": '1',
"Version_Minor": '0',
"Description": movie_description,
"Creation_Date": creation_date,
"Provider_ID": vod_config.provider_id,
"Asset_ID": movie_asset_id,
"Asset_Class": "movie"
}
)
vod_package.D_app["movie"].update({"Type": "movie"})
# Preview section
if has_preview:
preview_asset_name = "{} {} (preview)".format(short_name, suffix)
preview_description = "{} {} (preview asset)".format(
short_name, suffix
)
preview_asset_id = "{}R{}{}".format(
vod_config.prefix, asset_id, suffix
)
vod_package.D_ams["preview"].update(
{
"Provider": vod_config.provider,
"Product": vod_config.product,
"Asset_Name": preview_asset_name,
"Version_Major": '1',
"Version_Minor": '0',
"Description": preview_description,
"Creation_Date": creation_date,
"Provider_ID": vod_config.provider_id,
"Asset_ID": preview_asset_id,
"Asset_Class": "preview"
}
)
vod_package.D_app["preview"].update(
{"Type": "preview", "Rating": ["NR"]}
)
if has_poster:
poster_asset_name = "{} {} (poster)".format(short_name, suffix)
poster_description = "{} {} (poster asset)".format(short_name, suffix)
poster_asset_id = "{}I{}{}".format(vod_config.prefix, asset_id, suffix)
vod_package.D_ams["poster"].update(
{
"Provider": vod_config.provider,
"Product": vod_config.product,
"Asset_Name": poster_asset_name,
"Version_Major": '1',
"Version_Minor": '0',
"Description": poster_description,
"Creation_Date": creation_date,
"Provider_ID": vod_config.provider_id,
"Asset_ID": poster_asset_id,
"Asset_Class": "poster"
}
)
vod_package.D_app["poster"].update({"Type": "poster"})
if has_box_cover:
box_cover_asset_name = "{} {} (box cover)".format(short_name, suffix)
box_cover_description = "{} {} (box cover asset)".format(
short_name, suffix
)
box_cover_asset_id = "{}B{}{}".format(
vod_config.prefix, asset_id, suffix
)
vod_package.D_ams["box cover"].update(
{
"Provider": vod_config.provider,
"Product": vod_config.product,
"Asset_Name": box_cover_asset_name,
"Version_Major": '1',
"Version_Minor": '0',
"Description": box_cover_description,
"Creation_Date": creation_date,
"Provider_ID": vod_config.provider_id,
"Asset_ID": box_cover_asset_id,
"Asset_Class": "box cover"
}
)
vod_package.D_app["box cover"].update({"Type": "box cover"})
return vod_package
| mit |
lmorchard/django-allauth | allauth/socialaccount/providers/bitbucket/provider.py | 70 | 1098 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
class BitbucketAccount(ProviderAccount):
def get_profile_url(self):
return 'http://bitbucket.org/' + self.account.extra_data['username']
def get_avatar_url(self):
return self.account.extra_data.get('avatar')
def get_username(self):
return self.account.extra_data['username']
def to_str(self):
return self.get_username()
class BitbucketProvider(OAuthProvider):
id = 'bitbucket'
name = 'Bitbucket'
package = 'allauth.socialaccount.providers.bitbucket'
account_class = BitbucketAccount
def extract_uid(self, data):
return data['username']
def extract_common_fields(self, data):
return dict(email=data.get('email'),
first_name=data.get('first_name'),
username=data.get('username'),
last_name=data.get('last_name'))
providers.registry.register(BitbucketProvider)
| mit |
jamesliu/mxnet | example/gluon/style_transfer/data.py | 48 | 4148 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet.gluon.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=pil_loader):
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
| apache-2.0 |
rimbalinux/LMD3 | django/utils/datastructures.py | 12 | 16018 | from types import GeneratorType
from django.utils.copycompat import copy, deepcopy
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
import django.utils.copycompat as copy
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| bsd-3-clause |
surgebiswas/poker | PokerBots_2017/Johnny/theano/gof/graph.py | 3 | 43844 | """
Node classes (`Apply`, `Variable`) and expression graph algorithms.
To read about what theano graphs are from a user perspective, have a look at
`graph.html <../doc/graph.html>`__.
"""
from __future__ import print_function
from collections import deque
from copy import copy
from itertools import count
import theano
from theano import config
from theano.gof import utils
from six import string_types, integer_types, iteritems
from theano.misc.ordered_set import OrderedSet
__docformat__ = "restructuredtext en"
# Lazy imports to avoid circular dependencies.
is_same_graph_with_merge = None
equal_computations = None
NoParams = object()
class Node(utils.object2):
"""
A Node in a theano graph.
Graphs contain two kinds of Nodes -- Variable and Apply.
Edges in the graph are not explicitly represented.
Instead each Node keeps track of its parents via
Variable.owner / Apply.inputs and its children
via Variable.clients / Apply.outputs.
"""
def get_parents(self):
"""
Return a list of the parents of this node.
Should return a copy--i.e., modifying the return
value should not modify the graph structure.
"""
raise NotImplementedError()
class Apply(Node):
"""
An :term:`Apply` instance is a node in an expression graph which represents
the application of an `Op` to some input `Variable` nodes, producing some
output `Variable` nodes.
This class is typically instantiated by an Op's make_node() function, which
is typically called by that Op's __call__() function.
An Apply instance serves as a simple structure with three important
attributes:
- :literal:`inputs` : a list of `Variable` nodes that represent the
arguments of the expression,
- :literal:`outputs` : a list of `Variable` nodes that represent the
variable of the expression, and
- :literal:`op` : an `Op` instance that determines the nature of the
expression being applied.
The driver `compile.function` uses Apply's inputs attribute together with
Variable's owner attribute to search the expression graph and determine
which inputs are necessary to compute the function's outputs.
A `Linker` uses the Apply instance's `op` field to compute the variables.
Comparing with the Python language, an `Apply` instance is theano's version
of a function call (or expression instance) whereas `Op` is theano's version
of a function definition.
Parameters
----------
op : `Op` instance
inputs : list of Variable instances
outputs : list of Variable instances
Notes
-----
The owner field of each output in the outputs list will be set to self.
If an output element has an owner that is neither None nor self, then a
ValueError exception will be raised.
"""
def __init__(self, op, inputs, outputs):
self.op = op
self.inputs = []
self.tag = utils.scratchpad()
if not isinstance(inputs, (list, tuple)):
raise TypeError("The inputs of an Apply must be a list or tuple")
if not isinstance(outputs, (list, tuple)):
raise TypeError("The output of an Apply must be a list or tuple")
# filter inputs to make sure each element is a Variable
for input in inputs:
if isinstance(input, Variable):
self.inputs.append(input)
else:
raise TypeError("The 'inputs' argument to Apply must contain Variable instances, not %s" % input)
self.outputs = []
# filter outputs to make sure each element is a Variable
for i, output in enumerate(outputs):
if isinstance(output, Variable):
if output.owner is None:
output.owner = self
output.index = i
elif output.owner is not self or output.index != i:
raise ValueError("All output variables passed to Apply must belong to it.")
self.outputs.append(output)
else:
raise TypeError("The 'outputs' argument to Apply must contain Variable instances with no owner, not %s" % output)
def run_params(self):
"""
Returns the params for the node, or NoParams if no params is set.
"""
if hasattr(self.op, 'get_params'):
return self.op.get_params(self)
return NoParams
def __getstate__(self):
d = self.__dict__
# ufunc don't pickle/unpickle well
if hasattr(self.tag, 'ufunc'):
d = copy(self.__dict__)
t = d["tag"]
del t.ufunc
d["tag"] = t
return d
def default_output(self):
"""
Returns the default output for this node.
Returns
-------
Variable instance
An element of self.outputs, typically self.outputs[0].
Notes
-----
May raise AttributeError self.op.default_output is out of range, or if
there are multiple outputs and self.op.default_output does not exist.
"""
do = getattr(self.op, 'default_output', None)
if do is None:
if len(self.outputs) == 1:
return self.outputs[0]
else:
raise AttributeError(
"%s.default_output should be an output index." % self.op)
elif not isinstance(do, integer_types):
raise AttributeError("%s.default_output should be an int or long" %
self.op)
elif do < 0 or do >= len(self.outputs):
raise AttributeError("%s.default_output is out of range." %
self.op)
return self.outputs[do]
out = property(default_output,
doc="alias for self.default_output()")
"""
Alias for self.default_output().
"""
def __str__(self):
return op_as_string(self.inputs, self)
def __repr__(self):
return str(self)
def __asapply__(self):
return self
def clone(self):
"""
Duplicate this Apply instance with inputs = self.inputs.
Returns
-------
object
A new Apply instance (or subclass instance) with new outputs.
Notes
-----
Tags are copied from self to the returned instance.
"""
cp = self.__class__(self.op, self.inputs,
[output.clone() for output in self.outputs])
cp.tag = copy(self.tag)
return cp
def clone_with_new_inputs(self, inputs, strict=True):
"""
Duplicate this Apply instance in a new graph.
Parameters
----------
inputs
List of Variable instances to use as inputs.
strict : bool
If True, the type fields of all the inputs must be equal
to the current ones (or compatible, for instance Tensor /
CudaNdarray of the same dtype and broadcastable patterns,
in which case they will be converted into current Type), and
returned outputs are guaranteed to have the same types as
self.outputs. If False, then there's no guarantee that the
clone's outputs will have the same types as self.outputs,
and cloning may not even be possible (it depends on the Op).
Returns
-------
object
An Apply instance with the same op but different outputs.
"""
assert isinstance(inputs, (list, tuple))
remake_node = False
new_inputs = inputs[:]
for i, (curr, new) in enumerate(zip(self.inputs, new_inputs)):
if not curr.type == new.type:
if strict:
# If compatible, casts new into curr.type
new_inputs[i] = curr.type.filter_variable(new)
else:
remake_node = True
if remake_node:
new_node = self.op.make_node(*new_inputs)
new_node.tag = copy(self.tag).__update__(new_node.tag)
else:
new_node = self.clone()
new_node.inputs = new_inputs
return new_node
def get_parents(self):
return list(self.inputs)
# convenience properties
nin = property(lambda self: len(self.inputs), doc='same as len(self.inputs)')
"""
Property: Number of inputs.
"""
nout = property(lambda self: len(self.outputs), doc='same as len(self.outputs)')
"""
Property: Number of outputs.
"""
params_type = property(lambda self: self.op.params_type, doc='type to use for the params')
class Variable(Node):
"""
A :term:`Variable` is a node in an expression graph that represents a
variable.
The inputs and outputs of every `Apply` (theano.gof.Apply) are `Variable`
instances. The input and output arguments to create a `function` are also
`Variable` instances. A `Variable` is like a strongly-typed variable in
some other languages; each `Variable` contains a reference to a `Type`
instance that defines the kind of value the `Variable` can take in a
computation.
A `Variable` is a container for four important attributes:
- :literal:`type` a `Type` instance defining the kind of value this
`Variable` can have,
- :literal:`owner` either None (for graph roots) or the `Apply` instance
of which `self` is an output,
- :literal:`index` the integer such that :literal:`owner.outputs[index] is
this_variable` (ignored if `owner` is None),
- :literal:`name` a string to use in pretty-printing and debugging.
There are a few kinds of Variables to be aware of: A Variable which is the
output of a symbolic computation has a reference to the Apply instance to
which it belongs (property: owner) and the position of itself in the owner's
output list (property: index).
- `Variable` (this base type) is typically the output of a symbolic
computation.
- `Constant` (a subclass) which adds a default and un-replaceable
:literal:`value`, and requires that owner is None.
- `TensorVariable` subclass of Variable that represents a numpy.ndarray
object.
- `TensorSharedVariable` Shared version of TensorVariable.
- `SparseVariable` subclass of Variable that represents
a scipy.sparse.{csc,csr}_matrix object.
- `CudaNdarrayVariable` subclass of Variable that represents our object on
the GPU that is a subset of numpy.ndarray.
- `RandomVariable`.
A Variable which is the output of a symbolic computation will have an owner
not equal to None.
Using the Variables' owner field and the Apply nodes' inputs fields, one can
navigate a graph from an output all the way to the inputs. The opposite
direction is not possible until a FunctionGraph has annotated the Variables
with the clients field, ie, before the compilation process has begun a
Variable does not know which Apply nodes take it as input.
Parameters
----------
type : a Type instance
The type governs the kind of data that can be associated with this
variable.
owner : None or Apply instance
The Apply instance which computes the value for this variable.
index : None or int
The position of this Variable in owner.outputs.
name : None or str
A string for pretty-printing and debugging.
Examples
--------
.. code-block:: python
import theano
from theano import tensor
a = tensor.constant(1.5) # declare a symbolic constant
b = tensor.fscalar() # declare a symbolic floating-point scalar
c = a + b # create a simple expression
f = theano.function([b], [c]) # this works because a has a value associated with it already
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
theano.function([a], [c]) # compilation error because b (required by c) is undefined
theano.function([a,b], [c]) # compilation error because a is constant, it can't be an input
d = tensor.value(1.5) # create a value similar to the constant 'a'
e = d + b
theano.function([d,b], [e]) # this works. d's default value of 1.5 is ignored.
The python variables :literal:`a,b,c` all refer to instances of type
`Variable`. The `Variable` refered to by `a` is also an instance of
`Constant`.
`compile.function` uses each `Apply` instance's `inputs` attribute together
with each Variable's `owner` field to determine which inputs are necessary
to compute the function's outputs.
"""
# __slots__ = ['type', 'owner', 'index', 'name']
__count__ = count(0)
def __init__(self, type, owner=None, index=None, name=None):
super(Variable, self).__init__()
self.tag = utils.scratchpad()
self.type = type
if owner is not None and not isinstance(owner, Apply):
raise TypeError("owner must be an Apply instance", owner)
self.owner = owner
if index is not None and not isinstance(index, integer_types):
raise TypeError("index must be an int", index)
self.index = index
if name is not None and not isinstance(name, string_types):
raise TypeError("name must be a string", name)
self.name = name
self.auto_name = 'auto_' + str(next(self.__count__))
def __str__(self):
"""Return a str representation of the Variable.
"""
if self.name is not None:
return self.name
if self.owner is not None:
op = self.owner.op
if self.index == op.default_output:
return str(self.owner.op) + ".out"
else:
return str(self.owner.op) + "." + str(self.index)
else:
return "<%s>" % str(self.type)
def __repr_test_value__(self):
"""Return a repr of the test value.
Return a printable representation of the test value. It can be
overridden by classes with non printable test_value to provide a
suitable representation of the test_value.
"""
return repr(theano.gof.op.get_test_value(self))
def __repr__(self, firstPass=True):
"""Return a repr of the Variable.
Return a printable name or description of the Variable. If
config.print_test_value is True it will also print the test_value if
any.
"""
to_print = [str(self)]
if config.print_test_value and firstPass:
try:
to_print.append(self.__repr_test_value__())
except AttributeError:
pass
return '\n'.join(to_print)
def clone(self):
"""
Return a new Variable like self.
Returns
-------
Variable instance
A new Variable instance (or subclass instance) with no owner or
index.
Notes
-----
Tags are copied to the returned instance.
Name is copied to the returned instance.
"""
# return copy(self)
cp = self.__class__(self.type, None, None, self.name)
cp.tag = copy(self.tag)
return cp
def __lt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __lt__',
self.__class__.__name__)
def __le__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __le__',
self.__class__.__name__)
def __gt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __gt__',
self.__class__.__name__)
def __ge__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __ge__',
self.__class__.__name__)
def get_parents(self):
if self.owner is not None:
return [self.owner]
return []
def eval(self, inputs_to_values=None):
"""
Evaluates this variable.
Parameters
----------
inputs_to_values
A dictionary mapping theano Variables to values.
Examples
--------
>>> import numpy
>>> import theano.tensor as T
>>> x = T.dscalar('x')
>>> y = T.dscalar('y')
>>> z = x + y
>>> numpy.allclose(z.eval({x : 16.3, y : 12.1}), 28.4)
True
We passed :func:`eval` a dictionary mapping symbolic theano
variables to the values to substitute for them, and it returned
the numerical value of the expression.
Notes
-----
`eval` will be slow the first time you call it on a variable --
it needs to call :func:`function` to compile the expression behind
the scenes. Subsequent calls to :func:`eval` on that same variable
will be fast, because the variable caches the compiled function.
This way of computing has more overhead than a normal Theano
function, so don't use it too much in real scripts.
"""
if inputs_to_values is None:
inputs_to_values = {}
if not hasattr(self, '_fn_cache'):
self._fn_cache = dict()
inputs = tuple(sorted(inputs_to_values.keys(), key=id))
if inputs not in self._fn_cache:
self._fn_cache[inputs] = theano.function(inputs, self)
args = [inputs_to_values[param] for param in inputs]
rval = self._fn_cache[inputs](*args)
return rval
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_fn_cache", None)
return d
class Constant(Variable):
"""
A :term:`Constant` is a `Variable` with a `value` field that cannot be
changed at runtime.
Constant nodes make eligible numerous optimizations: constant inlining in
C code, constant folding, etc.
Notes
-----
The data field is filtered by what is provided in the constructor for the
Constant's type field.
WRITEME
"""
# __slots__ = ['data']
def __init__(self, type, data, name=None):
Variable.__init__(self, type, None, None, name)
self.data = type.filter(data)
def equals(self, other):
# this does what __eq__ should do, but Variable and Apply should always be hashable by id
return isinstance(other, Constant) and self.signature() == other.signature()
def signature(self):
return (self.type, self.data)
def merge_signature(self):
return self.signature()
def __str__(self):
if self.name is not None:
return self.name
else:
name = str(self.data)
if len(name) > 20:
name = name[:10] + '...' + name[-10:]
return 'Constant{%s}' % name
def clone(self):
"""
We clone this object, but we don't clone the data to lower memory
requirement. We suppose that the data will never change.
"""
cp = self.__class__(self.type, self.data, self.name)
cp.tag = copy(self.tag)
return cp
def __set_owner(self, value):
"""
WRITEME
Raises
------
ValueError
If `value` is not `None`.
"""
if value is not None:
raise ValueError("Constant instances cannot have an owner.")
owner = property(lambda self: None, __set_owner)
value = property(lambda self: self.data, doc='read-only data access method')
# index is not defined, because the `owner` attribute must necessarily be None
def stack_search(start, expand, mode='bfs', build_inv=False):
"""
Search through a graph, either breadth- or depth-first.
Parameters
----------
start : deque
Search from these nodes.
expand : callable
When we get to a node, add expand(node) to the list of nodes to visit.
This function should return a list, or None.
Returns
-------
list of `Variable` or `Apply` instances (depends on `expend`)
The list of nodes in order of traversal.
Notes
-----
A node will appear at most once in the return value, even if it
appears multiple times in the start parameter.
:postcondition: every element of start is transferred to the returned list.
:postcondition: start is empty.
"""
if mode not in ('bfs', 'dfs'):
raise ValueError('mode should be bfs or dfs', mode)
rval_set = set()
rval_list = list()
if mode == 'bfs':
start_pop = start.popleft
else:
start_pop = start.pop
expand_inv = {}
while start:
l = start_pop()
if id(l) not in rval_set:
rval_list.append(l)
rval_set.add(id(l))
expand_l = expand(l)
if expand_l:
if build_inv:
for r in expand_l:
expand_inv.setdefault(r, []).append(l)
start.extend(expand_l)
assert len(rval_list) == len(rval_set)
if build_inv:
return rval_list, expand_inv
return rval_list
def ancestors(variable_list, blockers=None):
"""
Return the variables that contribute to those in variable_list (inclusive).
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
All input nodes, in the order found by a left-recursive depth-first
search started at the nodes in `variable_list`.
"""
def expand(r):
if r.owner and (not blockers or r not in blockers):
return reversed(r.owner.inputs)
dfs_variables = stack_search(deque(variable_list), expand, 'dfs')
return dfs_variables
def inputs(variable_list, blockers=None):
"""
Return the inputs required to compute the given Variables.
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
Input nodes with no owner, in the order found by a left-recursive
depth-first search started at the nodes in `variable_list`.
"""
vlist = ancestors(variable_list, blockers)
rval = [r for r in vlist if r.owner is None]
return rval
def variables_and_orphans(i, o):
"""
WRITEME
"""
def expand(r):
if r.owner and r not in i:
l = list(r.owner.inputs) + list(r.owner.outputs)
l.reverse()
return l
variables = stack_search(deque(o), expand, 'dfs')
orphans = [r for r in variables if r.owner is None and r not in i]
return variables, orphans
def ops(i, o):
"""
WRITEME
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
Returns
-------
object
The set of ops that are contained within the subgraph that lies
between i and o, including the owners of the L{Variable}s in o and
intermediary ops between i and o, but not the owners of the L{Variable}s
in i.
"""
ops = set()
variables, orphans = variables_and_orphans(i, o)
for r in variables:
if r not in i and r not in orphans:
if r.owner is not None:
ops.add(r.owner)
return ops
def variables(i, o):
"""
WRITEME
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
Returns
-------
object
The set of Variables that are involved in the subgraph that lies
between i and o. This includes i, o, orphans(i, o) and all values of
all intermediary steps from i to o.
"""
return variables_and_orphans(i, o)[0]
def orphans(i, o):
"""
WRITEME
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
Returns
-------
object
The set of Variables which one or more Variables in o depend on but are
neither in i nor in the subgraph that lies between i and o.
Examples
--------
orphans([x], [(x+y).out]) => [y]
"""
return variables_and_orphans(i, o)[1]
def clone(i, o, copy_inputs=True):
"""
Copies the subgraph contained between i and o.
Parameters
----------
i : list
Input L{Variable}s.
o : list
Output L{Variable}s.
copy_inputs : bool
If True, the inputs will be copied (defaults to True).
Returns
-------
object
The inputs and outputs of that copy.
"""
equiv = clone_get_equiv(i, o, copy_inputs)
return [equiv[input] for input in i], [equiv[output] for output in o]
def clone_get_equiv(inputs, outputs, copy_inputs_and_orphans=True, memo=None):
"""
Return a dictionary that maps from Variable and Apply nodes in the
original graph to a new node (a clone) in a new graph.
This function works by recursively cloning inputs... rebuilding a directed
graph from the bottom (inputs) up to eventually building new outputs.
Parameters
----------
inputs : a list of Variables
outputs : a list of Variables
copy_inputs_and_orphans : bool
True means to create the cloned graph from new input and constant
nodes (the bottom of a feed-upward graph).
False means to clone a graph that is rooted at the original input
nodes.
memo : None or dict
Optionally start with a partly-filled dictionary for the return value.
If a dictionary is passed, this function will work in-place on that
dictionary and return it.
"""
if memo is None:
memo = {}
# clone the inputs if necessary
for input in inputs:
if copy_inputs_and_orphans:
cpy = input.clone()
cpy.owner = None
cpy.index = None
memo.setdefault(input, cpy)
else:
memo.setdefault(input, input)
# go through the inputs -> outputs graph cloning as we go
for apply in io_toposort(inputs, outputs):
for input in apply.inputs:
if input not in memo:
if copy_inputs_and_orphans:
cpy = input.clone()
memo[input] = cpy
else:
memo[input] = input
new_apply = apply.clone_with_new_inputs([memo[i] for i in apply.inputs])
memo.setdefault(apply, new_apply)
for output, new_output in zip(apply.outputs, new_apply.outputs):
memo.setdefault(output, new_output)
# finish up by cloning any remaining outputs (it can happen)
for output in outputs:
if output not in memo:
memo[output] = output.clone()
return memo
def general_toposort(r_out, deps, debug_print=False,
compute_deps_cache=None, deps_cache=None,
clients=None):
"""
WRITEME
Parameters
----------
deps
A python function that takes a node as input and returns its dependence.
compute_deps_cache : optional
If provided deps_cache should also be provided. This is a function like
deps, but that also cache its results in a dict passed as deps_cache.
deps_cache : dict
Must be used with compute_deps_cache.
clients : dict
If a dict is passed it will be filled with a mapping of node
-> clients for each node in the subgraph.
Notes
-----
deps(i) should behave like a pure function (no funny business with
internal state).
deps(i) will be cached by this function (to be fast).
The order of the return value list is determined by the order of nodes
returned by the deps() function.
deps should be provided or can be None and the caller provides
compute_deps_cache and deps_cache. The second option removes a Python
function call, and allows for more specialized code, so it can be
faster.
"""
if compute_deps_cache is None:
deps_cache = {}
def compute_deps_cache(io):
if io not in deps_cache:
d = deps(io)
if d:
if not isinstance(d, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[io] = list(d)
else:
deps_cache[io] = d
return d
else:
return deps_cache[io]
assert deps_cache is not None
assert isinstance(r_out, (tuple, list, deque))
reachable, _clients = stack_search(deque(r_out), compute_deps_cache,
'dfs', True)
if clients is not None:
clients.update(_clients)
sources = deque([r for r in reachable if not deps_cache.get(r, None)])
rset = set()
rlist = []
while sources:
node = sources.popleft()
if node not in rset:
rlist.append(node)
rset.add(node)
for client in _clients.get(node, []):
deps_cache[client] = [a for a in deps_cache[client]
if a is not node]
if not deps_cache[client]:
sources.append(client)
if len(rlist) != len(reachable):
if debug_print:
print('')
print(reachable)
print(rlist)
raise ValueError('graph contains cycles')
return rlist
def io_toposort(inputs, outputs, orderings=None, clients=None):
"""
WRITEME
Parameters
----------
inputs : list or tuple of Variable instances
outputs : list or tuple of Apply instances
orderings : dict
Key: Apply instance. Value: list of Apply instance.
It is important that the value be a container with a deterministic
iteration order. No sets allowed!
clients : dict
If a dict is provided it will be filled with mappings of
node->clients for each node in the subgraph that is sorted
"""
# the inputs are used only here in the function that decides what 'predecessors' to explore
iset = set(inputs)
# We build 2 functions as a speed up
deps_cache = {}
compute_deps = None
compute_deps_cache = None
if not orderings: # can be None or empty dict
# Specialized function that is faster when no ordering.
# Also include the cache in the function itself for speed up.
def compute_deps_cache(obj):
if obj in deps_cache:
return deps_cache[obj]
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
if rval:
if not isinstance(rval, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[obj] = list(rval)
else:
deps_cache[obj] = rval
else:
deps_cache[obj] = rval
return rval
else:
def compute_deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
topo = general_toposort(outputs, deps=compute_deps,
compute_deps_cache=compute_deps_cache,
deps_cache=deps_cache, clients=clients)
return [o for o in topo if isinstance(o, Apply)]
default_leaf_formatter = str
def default_node_formatter(op, argstrings):
return "%s(%s)" % (op.op, ", ".join(argstrings))
def io_connection_pattern(inputs, outputs):
"""
Returns the connection pattern of a subgraph defined by given
inputs and outputs.
"""
inner_nodes = io_toposort(inputs, outputs)
# Initialize 'connect_pattern_by_var' by establishing each input as
# connected only to itself
connect_pattern_by_var = {}
nb_inputs = len(inputs)
for i in range(nb_inputs):
input = inputs[i]
inp_connection_pattern = [i == j for j in range(nb_inputs)]
connect_pattern_by_var[input] = inp_connection_pattern
# Iterate through the nodes used to produce the outputs from the
# inputs and, for every node, infer their connection pattern to
# every input from the connection patterns of their parents.
for n in inner_nodes:
# Get the connection pattern of the inner node's op. If the op
# does not define a connection_pattern method, assume that
# every node output is connected to every node input
try:
op_connection_pattern = n.op.connection_pattern(n)
except AttributeError:
op_connection_pattern = ([[True] * len(n.outputs)] *
len(n.inputs))
# For every output of the inner node, figure out which inputs it
# is connected to by combining the connection pattern of the inner
# node and the connection patterns of the inner node's inputs.
for out_idx in range(len(n.outputs)):
out = n.outputs[out_idx]
out_connection_pattern = [False] * nb_inputs
for inp_idx in range(len(n.inputs)):
inp = n.inputs[inp_idx]
if inp in connect_pattern_by_var:
inp_connection_pattern = connect_pattern_by_var[inp]
# If the node output is connected to the node input, it
# means it is connected to every inner input that the
# node inputs is connected to
if op_connection_pattern[inp_idx][out_idx]:
out_connection_pattern = [out_connection_pattern[i] or
inp_connection_pattern[i]
for i in range(nb_inputs)]
# Store the connection pattern of the node output
connect_pattern_by_var[out] = out_connection_pattern
# Obtain the global connection pattern by combining the
# connnection patterns of the individual outputs
global_connection_pattern = [[] for o in range(len(inputs))]
for out in outputs:
out_connection_pattern = connect_pattern_by_var[out]
for i in range(len(inputs)):
global_connection_pattern[i].append(out_connection_pattern[i])
return global_connection_pattern
def is_same_graph(var1, var2, givens=None, debug=False):
"""
Return True iff Variables `var1` and `var2` perform the same computation.
By 'performing the same computation', we mean that they must share the same
graph, so that for instance this function will return False when comparing
(x * (y * z)) with ((x * y) * z).
The current implementation is not efficient since, when possible, it
verifies equality by calling two different functions that are expected to
return the same output. The goal is to verify this assumption, to
eventually get rid of one of them in the future.
Parameters
----------
var1
The first Variable to compare.
var2
The second Variable to compare.
givens
Similar to the `givens` argument of `theano.function`, it can be used
to perform substitutions in the computational graph of `var1` and
`var2`. This argument is associated to neither `var1` nor `var2`:
substitutions may affect both graphs if the substituted variable
is present in both.
debug : bool
If True, then an exception is raised when we are in a situation where
the `equal_computations` implementation cannot be called.
This parameter is intended to be used in tests only, to make sure we
properly test both implementations.
Examples
--------
====== ====== ====== ======
var1 var2 givens output
====== ====== ====== ======
x + 1 x + 1 {} True
x + 1 y + 1 {} False
x + 1 y + 1 {x: y} True
====== ====== ====== ======
"""
# Lazy import.
if givens is None:
givens = {}
global equal_computations, is_same_graph_with_merge
if equal_computations is None:
from theano.gof.opt import is_same_graph_with_merge
from theano.scan_module.scan_utils import equal_computations
# Convert `givens` to dictionary.
if not isinstance(givens, dict):
givens = dict(givens)
# Get result from the merge-based function.
rval1 = is_same_graph_with_merge(var1=var1, var2=var2, givens=givens)
# Get result from the function `equal_computations` from scan_utils.
use_equal_computations = True
if givens:
# We need to build the `in_xs` and `in_ys` lists. To do this, we need
# to be able to tell whether a variable belongs to the computational
# graph of `var1` or `var2`.
# The typical case we want to handle is when `to_replace` belongs to
# one of these graphs, and `replace_by` belongs to the other one. In
# other situations, the current implementation of `equal_computations`
# is probably not appropriate, so we do not call it.
ok = True
in_xs = []
in_ys = []
# Compute the sets of all variables found in each computational graph.
inputs_var = list(map(inputs, ([var1], [var2])))
all_vars = [set(variables(v_i, v_o))
for v_i, v_o in ((inputs_var[0], [var1]),
(inputs_var[1], [var2]))]
def in_var(x, k):
# Return True iff `x` is in computation graph of variable `vark`.
return x in all_vars[k - 1]
for to_replace, replace_by in iteritems(givens):
# Map a substitution variable to the computational graphs it
# belongs to.
inside = dict((v, [in_var(v, k) for k in (1, 2)])
for v in (to_replace, replace_by))
if (inside[to_replace][0] and not inside[to_replace][1] and
inside[replace_by][1] and not inside[replace_by][0]):
# Substitute variable in `var1` by one from `var2`.
in_xs.append(to_replace)
in_ys.append(replace_by)
elif (inside[to_replace][1] and not inside[to_replace][0] and
inside[replace_by][0] and not inside[replace_by][1]):
# Substitute variable in `var2` by one from `var1`.
in_xs.append(replace_by)
in_ys.append(to_replace)
else:
ok = False
break
if not ok:
# We cannot directly use `equal_computations`.
if debug:
raise AssertionError(
'When `debug` is True we want to make sure we are also '
'using the `equal_computations` implementation')
use_equal_computations = False
else:
in_xs = None
in_ys = None
if use_equal_computations:
rval2 = equal_computations(xs=[var1], ys=[var2],
in_xs=in_xs, in_ys=in_ys)
assert rval2 == rval1
return rval1
def op_as_string(i, op,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
WRITEME
"""
strs = as_string(i, op.inputs, leaf_formatter, node_formatter)
return node_formatter(op, strs)
def as_string(i, o,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
WRITEME
Parameters
----------
i : list
Input `Variable` s.
o : list
Output `Variable` s.
leaf_formatter : function
Takes a `Variable` and returns a string to describe it.
node_formatter : function
Takes an `Op` and the list of strings corresponding to its arguments
and returns a string to describe it.
Returns
-------
str
Returns a string representation of the subgraph between i and o. If the
same op is used by several other ops, the first occurrence will be
marked as :literal:`*n -> description` and all subsequent occurrences
will be marked as :literal:`*n`, where n is an id number (ids are
attributed in an unspecified order and only exist for viewing
convenience).
"""
i = set(i)
orph = orphans(i, o)
multi = set()
seen = set()
for output in o:
op = output.owner
if op in seen:
multi.add(op)
else:
seen.add(op)
for op in ops(i, o):
for input in op.inputs:
op2 = input.owner
if input in i or input in orph or op2 is None:
continue
if op2 in seen:
multi.add(op2)
else:
seen.add(input.owner)
multi = [x for x in multi]
done = set()
def multi_index(x):
return multi.index(x) + 1
def describe(r):
if r.owner is not None and r not in i and r not in orph:
op = r.owner
idx = op.outputs.index(r)
if len(op.outputs) == 1:
idxs = ""
else:
idxs = "::%i" % idx
if op in done:
return "*%i%s" % (multi_index(op), idxs)
else:
done.add(op)
s = node_formatter(op, [describe(input) for input in op.inputs])
if op in multi:
return "*%i -> %s" % (multi_index(op), s)
else:
return s
else:
return leaf_formatter(r)
return [describe(output) for output in o]
def view_roots(r):
"""
Utility function that returns the leaves of a search through
consecutive view_map()s.
WRITEME
"""
owner = r.owner
if owner is not None:
try:
view_map = owner.op.view_map
view_map = dict((owner.outputs[o], i)
for o, i in iteritems(view_map))
except AttributeError:
return [r]
if r in view_map:
answer = []
for i in view_map[r]:
answer += view_roots(owner.inputs[i])
return answer
else:
return [r]
else:
return [r]
def list_of_nodes(inputs, outputs):
"""
Return the apply nodes of the graph between inputs and outputs.
"""
return stack_search(
deque([o.owner for o in outputs]),
lambda o: [inp.owner for inp in o.inputs
if inp.owner and
not any(i in inp.owner.outputs for i in inputs)])
| mit |
mandeepdhami/horizon | openstack_dashboard/dashboards/project/stacks/urls.py | 56 | 1765 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.stacks import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^select_template$',
views.SelectTemplateView.as_view(),
name='select_template'),
url(r'^launch$', views.CreateStackView.as_view(), name='launch'),
url(r'^preview_template$',
views.PreviewTemplateView.as_view(), name='preview_template'),
url(r'^preview$', views.PreviewStackView.as_view(), name='preview'),
url(r'^preview_details$',
views.PreviewStackDetailsView.as_view(), name='preview_details'),
url(r'^stack/(?P<stack_id>[^/]+)/$',
views.DetailView.as_view(), name='detail'),
url(r'^(?P<stack_id>[^/]+)/change_template$',
views.ChangeTemplateView.as_view(), name='change_template'),
url(r'^(?P<stack_id>[^/]+)/edit_stack$',
views.EditStackView.as_view(), name='edit_stack'),
url(r'^stack/(?P<stack_id>[^/]+)/(?P<resource_name>[^/]+)/$',
views.ResourceView.as_view(), name='resource'),
url(r'^get_d3_data/(?P<stack_id>[^/]+)/$',
views.JSONView.as_view(), name='d3_data'),
)
| apache-2.0 |
BhallaLab/moose | moose-examples/traub_2005/py/test_deeplts.py | 4 | 1516 | # test_deeplts.py ---
#
# Filename: test_deeplts.py
# Description:
# Author:
# Maintainer:
# Created: Mon Jul 16 16:12:55 2012 (+0530)
# Version:
# Last-Updated: Thu Nov 8 18:00:47 2012 (+0530)
# By: subha
# Update #: 501
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# Code:
import unittest
from cell_test_util import SingleCellCurrentStepTest
import testutils
import cells
from moose import utils
simdt = 5e-6
plotdt = 0.25e-3
simtime = 1.0
# pulsearray = [[1.0, 100e-3, 1e-9],
# [0.5, 100e-3, 0.3e-9],
# [0.5, 100e-3, 0.1e-9],
# [0.5, 100e-3, -0.1e-9],
# [0.5, 100e-3, -0.3e-9]]
class TestDeepLTS(SingleCellCurrentStepTest):
def __init__(self, *args, **kwargs):
self.celltype = 'DeepLTS'
SingleCellCurrentStepTest.__init__(self, *args, **kwargs)
self.pulse_array = [(100e-3, 100e-3, 1e-9),
(1e9, 0, 0)]
# self.solver = 'ee'
self.simdt = simdt
self.plotdt = plotdt
def setUp(self):
SingleCellCurrentStepTest.setUp(self)
def testVmSeriesPlot(self):
self.runsim(simtime, pulsearray=self.pulse_array)
self.plot_vm()
def testChannelDensities(self):
pass
# equal = compare_cell_dump(self.dump_file, '../nrn/'+self.dump_file)
# self.assertTrue(equal)
if __name__ == '__main__':
unittest.main()
#
# test_deeplts.py ends here
| gpl-3.0 |
MasterOdin/TuringMachine | Python/pylint_runner.py | 1 | 1645 | #!/usr/bin/env python
"""
Runs pylint on all contained python files in this directory, printint out
nice colorized warnings/errors without all the other report fluff
"""
from __future__ import print_function
import os
from pylint.lint import Run
__author__ = "Matthew 'MasterOdin' Peveler"
__license__ = "The MIT License (MIT)"
IGNORE_FOLDERS = [".git", ".idea", "__pycache__"]
def run_runner():
"""
Runs pylint on all python files in the current directory
"""
pylint_files = get_files_from_dir(os.curdir)
print("pylint running on the following files:")
for pylint_file in pylint_files:
print(pylint_file)
print("----")
Run(pylint_files)
def get_files_from_dir(current_dir):
"""
Recursively Walk through a directory and get all python files and then walk
through any potential directories that are found off current directory,
so long as not within IGNORE_FOLDERS
:return: all python files that were found off current_dir
"""
files = []
for dir_file in os.listdir(current_dir):
if current_dir != ".":
file_path = current_dir + dir_file
else:
file_path = dir_file
if os.path.isfile(file_path):
file_split = os.path.splitext(dir_file)
if len(file_split) == 2 and file_split[0] != "" \
and file_split[1] == '.py':
print(file_path)
files.append(file_path)
elif os.path.isdir(dir_file) and dir_file not in IGNORE_FOLDERS:
files += get_files_from_dir(dir_file+"/")
return files
if __name__ == "__main__":
run_runner()
| mit |
lukas-hetzenecker/home-assistant | homeassistant/components/sensor/envisalink.py | 21 | 2764 | """
Support for Envisalink sensors (shows panel info).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.envisalink/
"""
import logging
from homeassistant.components.envisalink import (EVL_CONTROLLER,
PARTITION_SCHEMA,
CONF_PARTITIONNAME,
EnvisalinkDevice,
SIGNAL_PARTITION_UPDATE,
SIGNAL_KEYPAD_UPDATE)
DEPENDENCIES = ['envisalink']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Perform the setup for Envisalink sensor devices."""
_configured_partitions = discovery_info['partitions']
for part_num in _configured_partitions:
_device_config_data = PARTITION_SCHEMA(
_configured_partitions[part_num])
_device = EnvisalinkSensor(
_device_config_data[CONF_PARTITIONNAME],
part_num,
EVL_CONTROLLER.alarm_state['partition'][part_num],
EVL_CONTROLLER)
add_devices_callback([_device])
class EnvisalinkSensor(EnvisalinkDevice):
"""Representation of an Envisalink keypad."""
def __init__(self, partition_name, partition_number, info, controller):
"""Initialize the sensor."""
from pydispatch import dispatcher
self._icon = 'mdi:alarm'
self._partition_number = partition_number
_LOGGER.debug('Setting up sensor for partition: ' + partition_name)
EnvisalinkDevice.__init__(self,
partition_name + ' Keypad',
info,
controller)
dispatcher.connect(self._update_callback,
signal=SIGNAL_PARTITION_UPDATE,
sender=dispatcher.Any)
dispatcher.connect(self._update_callback,
signal=SIGNAL_KEYPAD_UPDATE,
sender=dispatcher.Any)
@property
def icon(self):
"""Return the icon if any."""
return self._icon
@property
def state(self):
"""Return the overall state."""
return self._info['status']['alpha']
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._info['status']
def _update_callback(self, partition):
"""Update the partition state in HA, if needed."""
if partition is None or int(partition) == self._partition_number:
self.hass.async_add_job(self.update_ha_state)
| mit |
vinchoi/fishplay | flask/lib/python2.7/site-packages/click/_compat.py | 66 | 20706 | import re
import io
import os
import sys
import codecs
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith('win')
DEFAULT_COLUMNS = 80
_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(stream, encoding, errors):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors,
line_buffering=True)
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
def get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(self, stream, encoding, errors, **extra):
self._stream = stream = _FixupStream(stream)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, 'read1', None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
x = getattr(self._stream, 'readable', None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
x = getattr(self._stream, 'writable', None)
if x is not None:
return x()
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
bytes = str
raw_input = raw_input
string_types = (str, unicode)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
# For Windows, we need to force stdout/stdin/stderr to binary if it's
# fetched for that. This obviously is not the most correct way to do
# it as it changes global state. Unfortunately, there does not seem to
# be a clear better way to do it as just reopening the file in binary
# mode does not change anything.
#
# An option would be to do what Python 3 does and to open the file as
# binary only, patch it back to the system, and then use a wrapper
# stream that converts newlines. It's not quite clear what's the
# correct option here.
#
# This code also lives in _winconsole for the fallback to the console
# emulation stream.
if WIN:
import msvcrt
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
else:
set_binary_mode = lambda x: x
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
return set_binary_mode(sys.stdout)
def get_binary_stderr():
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise, it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set, we need to verify that the
# reader is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding, we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_reader, encoding, errors)
def _force_correct_text_writer(text_writer, encoding, errors):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set, we need to verify that the
# writer is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding, we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_writer, encoding, errors)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
else:
value = value.encode('utf-8', 'surrogateescape') \
.decode('utf-8', 'replace')
return value
def get_streerror(e, default=None):
if hasattr(e, 'strerror'):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode('utf-8', 'replace')
return msg
def open_stream(filename, mode='r', encoding=None, errors='strict',
atomic=False):
# Standard streams first. These are simple because they don't need
# special handling for the atomic flag. It's entirely ignored.
if filename == '-':
if 'w' in mode:
if 'b' in mode:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if 'b' in mode:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
if encoding is None:
return open(filename, mode), True
return io.open(filename, mode, encoding=encoding, errors=errors), True
# Some usability stuff for atomic writes
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('Atomic writes only make sense with `w`-mode.')
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import tempfile
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
prefix='.__atomic-write')
if encoding is not None:
f = io.open(fd, mode, encoding=encoding, errors=errors)
else:
f = os.fdopen(fd, mode)
return _AtomicFile(f, tmp_filename, filename), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, 'replace'):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub('', value)
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=''):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip('\r\n')
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(
lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(
lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(
lambda: sys.stderr, get_text_stderr)
binary_streams = {
'stdin': get_binary_stdin,
'stdout': get_binary_stdout,
'stderr': get_binary_stderr,
}
text_streams = {
'stdin': get_text_stdin,
'stdout': get_text_stdout,
'stderr': get_text_stderr,
}
| gpl-3.0 |
jamespcole/home-assistant | homeassistant/components/geo_location/__init__.py | 9 | 2057 | """Support for Geolocation."""
from datetime import timedelta
import logging
from typing import Optional
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
_LOGGER = logging.getLogger(__name__)
ATTR_DISTANCE = 'distance'
ATTR_SOURCE = 'source'
DOMAIN = 'geo_location'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup(hass, config):
"""Set up the Geolocation component."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
return True
class GeolocationEvent(Entity):
"""This represents an external event with an associated geolocation."""
@property
def state(self):
"""Return the state of the sensor."""
if self.distance is not None:
return round(self.distance, 1)
return None
@property
def source(self) -> str:
"""Return source value of this external event."""
raise NotImplementedError
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return None
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return None
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return None
@property
def state_attributes(self):
"""Return the state attributes of this external event."""
data = {}
if self.latitude is not None:
data[ATTR_LATITUDE] = round(self.latitude, 5)
if self.longitude is not None:
data[ATTR_LONGITUDE] = round(self.longitude, 5)
if self.source is not None:
data[ATTR_SOURCE] = self.source
return data
| apache-2.0 |
40223119/cdaw11 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_suite.py | 791 | 12066 | import unittest
import sys
from .support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite())
,(unittest.TestSuite(), unittest.TestSuite([]))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1'))
,(unittest.TestSuite([]), _mk_TestSuite('test_1'))
,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3'))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
proxysh/Safejumper-for-Mac | buildmac/Resources/env/lib/python2.7/site-packages/twisted/protocols/postfix.py | 15 | 3742 | # -*- test-case-name: twisted.test.test_postfix -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Postfix mail transport agent related protocols.
"""
import sys
try:
# Python 2
from UserDict import UserDict
except ImportError:
# Python 3
from collections import UserDict
try:
# Python 2
from urllib import quote as _quote, unquote as _unquote
except ImportError:
# Python 3
from urllib.parse import quote as _quote, unquote as _unquote
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import protocol, defer
from twisted.python import log
from twisted.python.compat import intToBytes, nativeString, networkString
# urllib's quote functions just happen to match
# the postfix semantics.
def quote(s):
return networkString(_quote(s))
def unquote(s):
return networkString(_unquote(nativeString(s)))
class PostfixTCPMapServer(basic.LineReceiver, policies.TimeoutMixin):
"""Postfix mail transport agent TCP map protocol implementation.
Receive requests for data matching given key via lineReceived,
asks it's factory for the data with self.factory.get(key), and
returns the data to the requester. None means no entry found.
You can use postfix's postmap to test the map service::
/usr/sbin/postmap -q KEY tcp:localhost:4242
"""
timeout = 600
delimiter = b'\n'
def connectionMade(self):
self.setTimeout(self.timeout)
def sendCode(self, code, message=b''):
"""
Send an SMTP-like code with a message.
"""
self.sendLine(intToBytes(code) + b' ' + message)
def lineReceived(self, line):
self.resetTimeout()
try:
request, params = line.split(None, 1)
except ValueError:
request = line
params = None
try:
f = getattr(self, 'do_' + nativeString(request))
except AttributeError:
self.sendCode(400, b'unknown command')
else:
try:
f(params)
except:
self.sendCode(400, b'Command ' + request + b' failed: ' +
networkString(str(sys.exc_info()[1])))
def do_get(self, key):
if key is None:
self.sendCode(400, b"Command 'get' takes 1 parameters.")
else:
d = defer.maybeDeferred(self.factory.get, key)
d.addCallbacks(self._cbGot, self._cbNot)
d.addErrback(log.err)
def _cbNot(self, fail):
self.sendCode(400, fail.getErrorMessage())
def _cbGot(self, value):
if value is None:
self.sendCode(500)
else:
self.sendCode(200, quote(value))
def do_put(self, keyAndValue):
if keyAndValue is None:
self.sendCode(400, b"Command 'put' takes 2 parameters.")
else:
try:
key, value = keyAndValue.split(None, 1)
except ValueError:
self.sendCode(400, b"Command 'put' takes 2 parameters.")
else:
self.sendCode(500, b'put is not implemented yet.')
class PostfixTCPMapDictServerFactory(UserDict, protocol.ServerFactory):
"""An in-memory dictionary factory for PostfixTCPMapServer."""
protocol = PostfixTCPMapServer
class PostfixTCPMapDeferringDictServerFactory(protocol.ServerFactory):
"""
An in-memory dictionary factory for PostfixTCPMapServer.
"""
protocol = PostfixTCPMapServer
def __init__(self, data=None):
self.data = {}
if data is not None:
self.data.update(data)
def get(self, key):
return defer.succeed(self.data.get(key))
| gpl-2.0 |
jolene-esposito/osf.io | website/addons/base/testing.py | 17 | 3174 | # -*- coding: utf-8 -*-
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
class AddonTestCase(OsfTestCase):
"""General Addon TestCase that automatically sets up a user and node with
an addon.
Must define:
- ADDON_SHORT_NAME (class variable)
- set_user_settings(self, settings): Method that makes any modifications
to the UserSettings object, e.g. setting access_token
- set_node_settings(self, settings): Metehod that makes any modifications
to the NodeSettings object.
This will give you:
- self.user: A User with the addon enabled
- self.project: A project created by self.user and has the addon enabled
- self.user_settings: AddonUserSettings object for the addon
- self.node_settings: AddonNodeSettings object for the addon
"""
ADDON_SHORT_NAME = None
OWNERS = ['user', 'node']
NODE_USER_FIELD = 'user_settings'
# Optional overrides
def create_user(self):
return AuthUserFactory.build()
def create_project(self):
return ProjectFactory(creator=self.user)
def set_user_settings(self, settings):
"""Make any necessary modifications to the user settings object,
e.g. setting access_token.
"""
raise NotImplementedError('Must define set_user_settings(self, settings) method')
def set_node_settings(self, settings):
raise NotImplementedError('Must define set_node_settings(self, settings) method')
def create_user_settings(self):
"""Initialize user settings object if requested by `self.OWNERS`.
"""
if 'user' not in self.OWNERS:
return
self.user.add_addon(self.ADDON_SHORT_NAME, override=True)
assert self.user.has_addon(self.ADDON_SHORT_NAME), '{0} is not enabled'.format(self.ADDON_SHORT_NAME)
self.user_settings = self.user.get_addon(self.ADDON_SHORT_NAME)
self.set_user_settings(self.user_settings)
self.user_settings.save()
def create_node_settings(self):
"""Initialize node settings object if requested by `self.OWNERS`,
additionally linking to user settings if requested by
`self.NODE_USER_FIELD`.
"""
if 'node' not in self.OWNERS:
return
self.project.add_addon(self.ADDON_SHORT_NAME, auth=Auth(self.user))
self.node_settings = self.project.get_addon(self.ADDON_SHORT_NAME)
# User has imported their addon settings to this node
if self.NODE_USER_FIELD:
setattr(self.node_settings, self.NODE_USER_FIELD, self.user_settings)
self.set_node_settings(self.node_settings)
self.node_settings.save()
def setUp(self):
super(AddonTestCase, self).setUp()
self.user = self.create_user()
if not self.ADDON_SHORT_NAME:
raise ValueError('Must define ADDON_SHORT_NAME in the test class.')
self.user.save()
self.project = self.create_project()
self.project.save()
self.create_user_settings()
self.create_node_settings()
| apache-2.0 |
mvaled/gunicorn | gunicorn/util.py | 10 | 15688 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import print_function
import email.utils
import fcntl
import io
import os
import pkg_resources
import random
import resource
import socket
import sys
import textwrap
import time
import traceback
import inspect
import errno
import warnings
import cgi
from gunicorn.errors import AppImportError
from gunicorn.six import text_type
from gunicorn.workers import SUPPORTED_WORKERS
MAXFD = 1024
REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
timeout_default = object()
CHUNK_SIZE = (16 * 1024)
MAX_BODY = 1024 * 132
# Server and Date aren't technically hop-by-hop
# headers, but they are in the purview of the
# origin server which the WSGI spec says we should
# act like. So we drop them and add our own.
#
# In the future, concatenation server header values
# might be better, but nothing else does it and
# dropping them is easier.
hop_headers = set("""
connection keep-alive proxy-authenticate proxy-authorization
te trailers transfer-encoding upgrade
server date
""".split())
try:
from setproctitle import setproctitle
def _setproctitle(title):
setproctitle("gunicorn: %s" % title)
except ImportError:
def _setproctitle(title):
return
try:
from importlib import import_module
except ImportError:
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
msg = "attempted relative import beyond top-level package"
raise ValueError(msg)
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def load_class(uri, default="gunicorn.workers.sync.SyncWorker",
section="gunicorn.workers"):
if inspect.isclass(uri):
return uri
if uri.startswith("egg:"):
# uses entry points
entry_str = uri.split("egg:")[1]
try:
dist, name = entry_str.rsplit("#", 1)
except ValueError:
dist = entry_str
name = default
try:
return pkg_resources.load_entry_point(dist, section, name)
except:
exc = traceback.format_exc()
msg = "class uri %r invalid or not found: \n\n[%s]"
raise RuntimeError(msg % (uri, exc))
else:
components = uri.split('.')
if len(components) == 1:
while True:
if uri.startswith("#"):
uri = uri[1:]
if uri in SUPPORTED_WORKERS:
components = SUPPORTED_WORKERS[uri].split(".")
break
try:
return pkg_resources.load_entry_point("gunicorn",
section, uri)
except:
exc = traceback.format_exc()
msg = "class uri %r invalid or not found: \n\n[%s]"
raise RuntimeError(msg % (uri, exc))
klass = components.pop(-1)
try:
mod = import_module('.'.join(components))
except:
exc = traceback.format_exc()
msg = "class uri %r invalid or not found: \n\n[%s]"
raise RuntimeError(msg % (uri, exc))
return getattr(mod, klass)
def set_owner_process(uid, gid):
""" set user and group of workers processes """
if gid:
# versions of python < 2.6.2 don't manage unsigned int for
# groups like on osx or fedora
gid = abs(gid) & 0x7FFFFFFF
os.setgid(gid)
if uid:
os.setuid(uid)
def chown(path, uid, gid):
gid = abs(gid) & 0x7FFFFFFF # see note above.
os.chown(path, uid, gid)
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
else:
_unlink = os.unlink
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def is_ipv6(addr):
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error: # not a valid address
return False
except ValueError: # ipv6 not supported on this platform
return False
return True
def parse_address(netloc, default_port=8000):
if netloc.startswith("unix://"):
return netloc.split("unix://")[1]
if netloc.startswith("unix:"):
return netloc.split("unix:")[1]
if netloc.startswith("tcp://"):
netloc = netloc.split("tcp://")[1]
# get host
if '[' in netloc and ']' in netloc:
host = netloc.split(']')[0][1:].lower()
elif ':' in netloc:
host = netloc.split(':')[0].lower()
elif netloc == "":
host = "0.0.0.0"
else:
host = netloc.lower()
#get port
netloc = netloc.split(']')[-1]
if ":" in netloc:
port = netloc.split(':', 1)[1]
if not port.isdigit():
raise RuntimeError("%r is not a valid port number." % port)
port = int(port)
else:
port = default_port
return (host, port)
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def close_on_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def set_non_blocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def close(sock):
try:
sock.close()
except socket.error:
pass
try:
from os import closerange
except ImportError:
def closerange(fd_low, fd_high):
# Iterate through and close all file descriptors.
for fd in range(fd_low, fd_high):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
def write_chunk(sock, data):
if isinstance(data, text_type):
data = data.encode('utf-8')
chunk_size = "%X\r\n" % len(data)
chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"])
sock.sendall(chunk)
def write(sock, data, chunked=False):
if chunked:
return write_chunk(sock, data)
sock.sendall(data)
def write_nonblock(sock, data, chunked=False):
timeout = sock.gettimeout()
if timeout != 0.0:
try:
sock.setblocking(0)
return write(sock, data, chunked)
finally:
sock.setblocking(1)
else:
return write(sock, data, chunked)
def writelines(sock, lines, chunked=False):
for line in list(lines):
write(sock, line, chunked)
def write_error(sock, status_int, reason, mesg):
html = textwrap.dedent("""\
<html>
<head>
<title>%(reason)s</title>
</head>
<body>
<h1><p>%(reason)s</p></h1>
%(mesg)s
</body>
</html>
""") % {"reason": reason, "mesg": cgi.escape(mesg)}
http = textwrap.dedent("""\
HTTP/1.1 %s %s\r
Connection: close\r
Content-Type: text/html\r
Content-Length: %d\r
\r
%s""") % (str(status_int), reason, len(html), html)
write_nonblock(sock, http.encode('latin1'))
def normalize_name(name):
return "-".join([w.lower().capitalize() for w in name.split("-")])
def import_app(module):
parts = module.split(":", 1)
if len(parts) == 1:
module, obj = module, "application"
else:
module, obj = parts[0], parts[1]
try:
__import__(module)
except ImportError:
if module.endswith(".py") and os.path.exists(module):
msg = "Failed to find application, did you mean '%s:%s'?"
raise ImportError(msg % (module.rsplit(".", 1)[0], obj))
else:
raise
mod = sys.modules[module]
try:
app = eval(obj, mod.__dict__)
except NameError:
raise AppImportError("Failed to find application: %r" % module)
if app is None:
raise AppImportError("Failed to find application object: %r" % obj)
if not callable(app):
raise AppImportError("Application object must be callable.")
return app
def getcwd():
# get current path, try to use PWD env first
try:
a = os.stat(os.environ['PWD'])
b = os.stat(os.getcwd())
if a.st_ino == b.st_ino and a.st_dev == b.st_dev:
cwd = os.environ['PWD']
else:
cwd = os.getcwd()
except:
cwd = os.getcwd()
return cwd
def http_date(timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
s = email.utils.formatdate(timestamp, localtime=False, usegmt=True)
return s
def is_hoppish(header):
return header.lower().strip() in hop_headers
def daemonize(enable_stdio_inheritance=False):
"""\
Standard daemonization of a process.
http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16
"""
if 'GUNICORN_FD' not in os.environ:
if os.fork():
os._exit(0)
os.setsid()
if os.fork():
os._exit(0)
os.umask(0o22)
# In both the following any file descriptors above stdin
# stdout and stderr are left untouched. The inheritence
# option simply allows one to have output go to a file
# specified by way of shell redirection when not wanting
# to use --error-log option.
if not enable_stdio_inheritance:
# Remap all of stdin, stdout and stderr on to
# /dev/null. The expectation is that users have
# specified the --error-log option.
closerange(0, 3)
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
if fd_null != 0:
os.dup2(fd_null, 0)
os.dup2(fd_null, 1)
os.dup2(fd_null, 2)
else:
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
# Always redirect stdin to /dev/null as we would
# never expect to need to read interactive input.
if fd_null != 0:
os.close(0)
os.dup2(fd_null, 0)
# If stdout and stderr are still connected to
# their original file descriptors we check to see
# if they are associated with terminal devices.
# When they are we map them to /dev/null so that
# are still detached from any controlling terminal
# properly. If not we preserve them as they are.
#
# If stdin and stdout were not hooked up to the
# original file descriptors, then all bets are
# off and all we can really do is leave them as
# they were.
#
# This will allow 'gunicorn ... > output.log 2>&1'
# to work with stdout/stderr going to the file
# as expected.
#
# Note that if using --error-log option, the log
# file specified through shell redirection will
# only be used up until the log file specified
# by the option takes over. As it replaces stdout
# and stderr at the file descriptor level, then
# anything using stdout or stderr, including having
# cached a reference to them, will still work.
def redirect(stream, fd_expect):
try:
fd = stream.fileno()
if fd == fd_expect and stream.isatty():
os.close(fd)
os.dup2(fd_null, fd)
except AttributeError:
pass
redirect(sys.stdout, 1)
redirect(sys.stderr, 2)
def seed():
try:
random.seed(os.urandom(64))
except NotImplementedError:
random.seed('%s.%s' % (time.time(), os.getpid()))
def check_is_writeable(path):
try:
f = open(path, 'a')
except IOError as e:
raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e))
f.close()
def to_bytestring(value):
"""Converts a string argument to a byte string"""
if isinstance(value, bytes):
return value
if not isinstance(value, text_type):
raise TypeError('%r is not a string' % value)
return value.encode("utf-8")
def is_fileobject(obj):
if not hasattr(obj, "tell") or not hasattr(obj, "fileno"):
return False
# check BytesIO case and maybe others
try:
obj.fileno()
except (IOError, io.UnsupportedOperation):
return False
return True
def warn(msg):
print("!!!", file=sys.stderr)
lines = msg.splitlines()
for i, line in enumerate(lines):
if i == 0:
line = "WARNING: %s" % line
print("!!! %s" % line, file=sys.stderr)
print("!!!\n", file=sys.stderr)
sys.stderr.flush()
def make_fail_app(msg):
def app(environ, start_response):
start_response("500 Internal Server Error", [
("Content-Type", "text/plain"),
("Content-Length", str(len(msg)))
])
return [msg]
return app
| mit |
briancurtin/libcloud | libcloud/compute/drivers/nephoscale.py | 32 | 17222 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NephoScale Cloud driver (http://www.nephoscale.com)
API documentation: http://docs.nephoscale.com
Created by Markos Gogoulos (https://mist.io)
"""
import base64
import sys
import time
import os
import binascii
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.utils.py3 import urlencode
from libcloud.compute.providers import Provider
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.types import (NodeState, InvalidCredsError,
LibcloudError)
from libcloud.compute.base import (Node, NodeDriver, NodeImage, NodeSize,
NodeLocation)
from libcloud.utils.networking import is_private_subnet
API_HOST = 'api.nephoscale.com'
NODE_STATE_MAP = {
'on': NodeState.RUNNING,
'off': NodeState.UNKNOWN,
'unknown': NodeState.UNKNOWN,
}
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
# used in create_node and specifies how many times to get the list of nodes and
# check if the newly created node is there. This is because when a request is
# sent to create a node, NephoScale replies with the job id, and not the node
# itself thus we don't have the ip addresses, that are required in deploy_node
CONNECT_ATTEMPTS = 10
class NodeKey(object):
def __init__(self, id, name, public_key=None, key_group=None,
password=None):
self.id = id
self.name = name
self.key_group = key_group
self.password = password
self.public_key = public_key
def __repr__(self):
return (('<NodeKey: id=%s, name=%s>') %
(self.id, self.name))
class NephoscaleResponse(JsonResponse):
"""
Nephoscale API Response
"""
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('Authorization Failed')
if self.status == httplib.NOT_FOUND:
raise Exception("The resource you are looking for is not found.")
return self.body
def success(self):
return self.status in VALID_RESPONSE_CODES
class NephoscaleConnection(ConnectionUserAndKey):
"""
Nephoscale connection class.
Authenticates to the API through Basic Authentication
with username/password
"""
host = API_HOST
responseCls = NephoscaleResponse
allow_insecure = False
def add_default_headers(self, headers):
"""
Add parameters that are necessary for every request
"""
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class NephoscaleNodeDriver(NodeDriver):
"""
Nephoscale node driver class.
>>> from libcloud.compute.providers import get_driver
>>> driver = get_driver('nephoscale')
>>> conn = driver('nepho_user','nepho_password')
>>> conn.list_nodes()
"""
type = Provider.NEPHOSCALE
api_name = 'nephoscale'
name = 'NephoScale'
website = 'http://www.nephoscale.com'
connectionCls = NephoscaleConnection
features = {'create_node': ['ssh_key']}
def list_locations(self):
"""
List available zones for deployment
:rtype: ``list`` of :class:`NodeLocation`
"""
result = self.connection.request('/datacenter/zone/').object
locations = []
for value in result.get('data', []):
location = NodeLocation(id=value.get('id'),
name=value.get('name'),
country='US',
driver=self)
locations.append(location)
return locations
def list_images(self):
"""
List available images for deployment
:rtype: ``list`` of :class:`NodeImage`
"""
result = self.connection.request('/image/server/').object
images = []
for value in result.get('data', []):
extra = {'architecture': value.get('architecture'),
'disks': value.get('disks'),
'billable_type': value.get('billable_type'),
'pcpus': value.get('pcpus'),
'cores': value.get('cores'),
'uri': value.get('uri'),
'storage': value.get('storage'),
}
image = NodeImage(id=value.get('id'),
name=value.get('friendly_name'),
driver=self,
extra=extra)
images.append(image)
return images
def list_sizes(self):
"""
List available sizes containing prices
:rtype: ``list`` of :class:`NodeSize`
"""
result = self.connection.request('/server/type/cloud/').object
sizes = []
for value in result.get('data', []):
value_id = value.get('id')
size = NodeSize(id=value_id,
name=value.get('friendly_name'),
ram=value.get('ram'),
disk=value.get('storage'),
bandwidth=None,
price=self._get_size_price(size_id=str(value_id)),
driver=self)
sizes.append(size)
return sorted(sizes, key=lambda k: k.price)
def list_nodes(self):
"""
List available nodes
:rtype: ``list`` of :class:`Node`
"""
result = self.connection.request('/server/cloud/').object
nodes = [self._to_node(value) for value in result.get('data', [])]
return nodes
def rename_node(self, node, name, hostname=None):
"""rename a cloud server, optionally specify hostname too"""
data = {'name': name}
if hostname:
data['hostname'] = hostname
params = urlencode(data)
result = self.connection.request('/server/cloud/%s/' % node.id,
data=params, method='PUT').object
return result.get('response') in VALID_RESPONSE_CODES
def reboot_node(self, node):
"""reboot a running node"""
result = self.connection.request('/server/cloud/%s/initiator/restart/'
% node.id, method='POST').object
return result.get('response') in VALID_RESPONSE_CODES
def ex_start_node(self, node):
"""start a stopped node"""
result = self.connection.request('/server/cloud/%s/initiator/start/'
% node.id, method='POST').object
return result.get('response') in VALID_RESPONSE_CODES
def ex_stop_node(self, node):
"""stop a running node"""
result = self.connection.request('/server/cloud/%s/initiator/stop/'
% node.id, method='POST').object
return result.get('response') in VALID_RESPONSE_CODES
def destroy_node(self, node):
"""destroy a node"""
result = self.connection.request('/server/cloud/%s/' % node.id,
method='DELETE').object
return result.get('response') in VALID_RESPONSE_CODES
def ex_list_keypairs(self, ssh=False, password=False, key_group=None):
"""
List available console and server keys
There are two types of keys for NephoScale, ssh and password keys.
If run without arguments, lists all keys. Otherwise list only
ssh keys, or only password keys.
Password keys with key_group 4 are console keys. When a server
is created, it has two keys, one password or ssh key, and
one password console key.
:keyword ssh: if specified, show ssh keys only (optional)
:type ssh: ``bool``
:keyword password: if specified, show password keys only (optional)
:type password: ``bool``
:keyword key_group: if specified, show keys with this key_group only
eg key_group=4 for console password keys (optional)
:type key_group: ``int``
:rtype: ``list`` of :class:`NodeKey`
"""
if (ssh and password):
raise LibcloudError('You can only supply ssh or password. To \
get all keys call with no arguments')
if ssh:
result = self.connection.request('/key/sshrsa/').object
elif password:
result = self.connection.request('/key/password/').object
else:
result = self.connection.request('/key/').object
keys = [self._to_key(value) for value in result.get('data', [])]
if key_group:
keys = [key for key in keys if
key.key_group == key_group]
return keys
def ex_create_keypair(self, name, public_key=None, password=None,
key_group=None):
"""Creates a key, ssh or password, for server or console
The group for the key (key_group) is 1 for Server and 4 for Console
Returns the id of the created key
"""
if public_key:
if not key_group:
key_group = 1
data = {
'name': name,
'public_key': public_key,
'key_group': key_group
}
params = urlencode(data)
result = self.connection.request('/key/sshrsa/', data=params,
method='POST').object
else:
if not key_group:
key_group = 4
if not password:
password = self.random_password()
data = {
'name': name,
'password': password,
'key_group': key_group
}
params = urlencode(data)
result = self.connection.request('/key/password/', data=params,
method='POST').object
return result.get('data', {}).get('id', '')
def ex_delete_keypair(self, key_id, ssh=False):
"""Delete an ssh key or password given it's id
"""
if ssh:
result = self.connection.request('/key/sshrsa/%s/' % key_id,
method='DELETE').object
else:
result = self.connection.request('/key/password/%s/' % key_id,
method='DELETE').object
return result.get('response') in VALID_RESPONSE_CODES
def create_node(self, name, size, image, server_key=None,
console_key=None, zone=None, **kwargs):
"""Creates the node, and sets the ssh key, console key
NephoScale will respond with a 200-200 response after sending a valid
request. If nowait=True is specified in the args, we then ask a few
times until the server is created and assigned a public IP address,
so that deploy_node can be run
>>> from libcloud.compute.providers import get_driver
>>> driver = get_driver('nephoscale')
>>> conn = driver('nepho_user','nepho_password')
>>> conn.list_nodes()
>>> name = 'staging-server'
>>> size = conn.list_sizes()[0]
<NodeSize: id=27, ...name=CS025 - 0.25GB, 10GB, ...>
>>> image = conn.list_images()[9]
<NodeImage: id=49, name=Linux Ubuntu Server 10.04 LTS 64-bit, ...>
>>> server_keys = conn.ex_list_keypairs(key_group=1)[0]
<NodeKey: id=71211, name=markos>
>>> server_key = conn.ex_list_keypairs(key_group=1)[0].id
70867
>>> console_keys = conn.ex_list_keypairs(key_group=4)[0]
<NodeKey: id=71213, name=mistio28434>
>>> console_key = conn.ex_list_keypairs(key_group=4)[0].id
70907
>>> node = conn.create_node(name=name, size=size, image=image, \
console_key=console_key, server_key=server_key)
We can also create an ssh key, plus a console key and
deploy node with them
>>> server_key = conn.ex_create_keypair(name, public_key='123')
71211
>>> console_key = conn.ex_create_keypair(name, key_group=4)
71213
We can increase the number of connect attempts to wait until
the node is created, so that deploy_node has ip address to
deploy the script
We can also specify the location
>>> location = conn.list_locations()[0]
>>> node = conn.create_node(name=name,
... size=size,
... image=image,
... console_key=console_key,
... server_key=server_key,
... connect_attempts=10,
... nowait=True,
... zone=location.id)
"""
hostname = kwargs.get('hostname', name)
service_type = size.id
image = image.id
connect_attempts = int(kwargs.get('connect_attempts',
CONNECT_ATTEMPTS))
data = {'name': name,
'hostname': hostname,
'service_type': service_type,
'image': image,
'server_key': server_key,
'console_key': console_key,
'zone': zone
}
params = urlencode(data)
try:
node = self.connection.request('/server/cloud/', data=params,
method='POST')
except Exception:
e = sys.exc_info()[1]
raise Exception("Failed to create node %s" % e)
node = Node(id='', name=name, state=NodeState.UNKNOWN, public_ips=[],
private_ips=[], driver=self)
nowait = kwargs.get('ex_wait', False)
if not nowait:
return node
else:
# try to get the created node public ips, for use in deploy_node
# At this point we don't have the id of the newly created Node,
# so search name in nodes
created_node = False
while connect_attempts > 0:
nodes = self.list_nodes()
created_node = [c_node for c_node in nodes if
c_node.name == name]
if created_node:
return created_node[0]
else:
time.sleep(60)
connect_attempts = connect_attempts - 1
return node
def _to_node(self, data):
"""Convert node in Node instances
"""
state = NODE_STATE_MAP.get(data.get('power_status'), '4')
public_ips = []
private_ips = []
ip_addresses = data.get('ipaddresses', '')
# E.g. "ipaddresses": "198.120.14.6, 10.132.60.1"
if ip_addresses:
for ip in ip_addresses.split(','):
ip = ip.replace(' ', '')
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
extra = {
'zone_data': data.get('zone'),
'zone': data.get('zone', {}).get('name'),
'image': data.get('image', {}).get('friendly_name'),
'create_time': data.get('create_time'),
'network_ports': data.get('network_ports'),
'is_console_enabled': data.get('is_console_enabled'),
'service_type': data.get('service_type', {}).get('friendly_name'),
'hostname': data.get('hostname')
}
node = Node(id=data.get('id'), name=data.get('name'), state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self, extra=extra)
return node
def _to_key(self, data):
return NodeKey(id=data.get('id'),
name=data.get('name'),
password=data.get('password'),
key_group=data.get('key_group'),
public_key=data.get('public_key'))
def random_password(self, size=8):
value = os.urandom(size)
password = binascii.hexlify(value).decode('ascii')
return password[:size]
| apache-2.0 |
svanschalkwyk/datafari | debian7/elk/kibana/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/__init__.py | 289 | 21425 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
| apache-2.0 |
klim-iv/phantomjs-qt5 | src/webkit/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py | 125 | 2506 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
class SuggestReviewers(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.git_commit,
Options.suggest_reviewers,
]
def run(self, state):
if not self._options.suggest_reviewers:
return
reviewers = self._tool.checkout().suggested_reviewers(self._options.git_commit, self._changed_files(state))[:5]
print "The following reviewers have recently modified files in your patch:"
print ", ".join([reviewer.full_name for reviewer in reviewers])
if not state.get('bug_id'):
return
if not self._tool.user.confirm("Would you like to CC them?"):
return
reviewer_emails = [reviewer.bugzilla_email() for reviewer in reviewers]
self._tool.bugs.add_cc_to_bug(state['bug_id'], reviewer_emails)
| bsd-3-clause |
tmxdyf/io2015-codelabs | search-samples/recipe-app-website/main.py | 16 | 3762 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
import json
import os
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def load_recipe(recipe_id):
filename = os.path.dirname(__file__) + '/recipes/' + recipe_id + '.json'
recipe = json.loads(open(filename, 'r').read())
recipe['id'] = recipe_id
return recipe
class MainPage(webapp2.RequestHandler):
def get(self):
template_values = {
'title': 'RecipeApp'
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class RecipePage(webapp2.RequestHandler):
def get(self, recipe_id):
query = self.request.get('q')
num_results = 0
if query:
num_results = 1
recipe = load_recipe(recipe_id)
ingredient_sections = ['']
ingredients_by_section = {'':[]}
for ingredient in recipe['ingredients']:
if 'category' in ingredient:
category = ingredient['category']
ingredient_section = []
if not category in ingredients_by_section:
ingredients_by_section[category] = ingredient_section
ingredient_sections.append(category)
else:
ingredient_section = ingredients_by_section[category]
ingredient_section.append(ingredient)
else:
ingredients_by_section[''].append(ingredient)
template_values = {
'title': recipe['title'],
'recipe': recipe,
'ingredients': ingredients_by_section,
'ingredient_sections': ingredient_sections,
'query': query,
'num_results': num_results
}
template = JINJA_ENVIRONMENT.get_template('recipe.html')
self.response.write(template.render(template_values))
class SearchResultsPage(webapp2.RequestHandler):
def get(self):
query = self.request.get('q')
results = []
clean_query = query.lower().strip()
if clean_query.endswith('recipes'):
clean_query = clean_query[:-7].strip()
for recipe_id in ['grilled-potato-salad', 'haloumi-salad', 'pierogi-poutine', 'wedge-salad', 'malaga-paella']:
recipe = load_recipe(recipe_id)
if recipe['title'].lower().find(clean_query) >= 0:
results.append(recipe)
if len(results) == 1:
self.redirect('/recipe/' + results[0]['id'] + '?q=' + query)
else:
template_values = {
'title': '"' + query + '" - RecipeApp',
'query': query,
'results': results,
'num_results': len(results)
}
template = JINJA_ENVIRONMENT.get_template('search.html')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/', MainPage),
(r'/recipe/(.+)', RecipePage),
(r'/search', SearchResultsPage)
], debug=True) | apache-2.0 |
home-assistant/home-assistant | tests/util/test_async.py | 5 | 7008 | """Tests for async util methods from Python source."""
import asyncio
import time
from unittest.mock import MagicMock, Mock, patch
import pytest
from homeassistant.util import async_ as hasync
@patch("asyncio.coroutines.iscoroutine")
@patch("concurrent.futures.Future")
@patch("threading.get_ident")
def test_fire_coroutine_threadsafe_from_inside_event_loop(
mock_ident, _, mock_iscoroutine
):
"""Testing calling fire_coroutine_threadsafe from inside an event loop."""
coro = MagicMock()
loop = MagicMock()
loop._thread_ident = None
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 5
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
with pytest.raises(RuntimeError):
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = False
with pytest.raises(TypeError):
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 2
@patch("concurrent.futures.Future")
@patch("threading.get_ident")
def test_run_callback_threadsafe_from_inside_event_loop(mock_ident, _):
"""Testing calling run_callback_threadsafe from inside an event loop."""
callback = MagicMock()
loop = Mock(spec=["call_soon_threadsafe"])
loop._thread_ident = None
mock_ident.return_value = 5
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 5
mock_ident.return_value = 5
with pytest.raises(RuntimeError):
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 2
async def test_check_loop_async():
"""Test check_loop detects when called from event loop without integration context."""
with pytest.raises(RuntimeError):
hasync.check_loop()
async def test_check_loop_async_integration(caplog):
"""Test check_loop detects when called from event loop from integration context."""
with pytest.raises(RuntimeError), patch(
"homeassistant.util.async_.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/core.py",
lineno="23",
line="do_something()",
),
Mock(
filename="/home/paulus/homeassistant/components/hue/light.py",
lineno="23",
line="self.light.is_on",
),
Mock(
filename="/home/paulus/aiohue/lights.py",
lineno="2",
line="something()",
),
],
):
hasync.check_loop()
assert (
"Detected I/O inside the event loop. This is causing stability issues. Please report issue for hue doing I/O at homeassistant/components/hue/light.py, line 23: self.light.is_on"
in caplog.text
)
async def test_check_loop_async_custom(caplog):
"""Test check_loop detects when called from event loop with custom component context."""
with pytest.raises(RuntimeError), patch(
"homeassistant.util.async_.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/core.py",
lineno="23",
line="do_something()",
),
Mock(
filename="/home/paulus/config/custom_components/hue/light.py",
lineno="23",
line="self.light.is_on",
),
Mock(
filename="/home/paulus/aiohue/lights.py",
lineno="2",
line="something()",
),
],
):
hasync.check_loop()
assert (
"Detected I/O inside the event loop. This is causing stability issues. Please report issue to the custom component author for hue doing I/O at custom_components/hue/light.py, line 23: self.light.is_on"
in caplog.text
)
def test_check_loop_sync(caplog):
"""Test check_loop does nothing when called from thread."""
hasync.check_loop()
assert "Detected I/O inside the event loop" not in caplog.text
def test_protect_loop_sync():
"""Test protect_loop calls check_loop."""
calls = []
with patch("homeassistant.util.async_.check_loop") as mock_loop:
hasync.protect_loop(calls.append)(1)
assert len(mock_loop.mock_calls) == 1
assert calls == [1]
async def test_gather_with_concurrency():
"""Test gather_with_concurrency limits the number of running tasks."""
runs = 0
now_time = time.time()
async def _increment_runs_if_in_time():
if time.time() - now_time > 0.1:
return -1
nonlocal runs
runs += 1
await asyncio.sleep(0.1)
return runs
results = await hasync.gather_with_concurrency(
2, *[_increment_runs_if_in_time() for i in range(4)]
)
assert results == [2, 2, -1, -1]
async def test_shutdown_run_callback_threadsafe(hass):
"""Test we can shutdown run_callback_threadsafe."""
hasync.shutdown_run_callback_threadsafe(hass.loop)
callback = MagicMock()
with pytest.raises(RuntimeError):
hasync.run_callback_threadsafe(hass.loop, callback)
async def test_run_callback_threadsafe(hass):
"""Test run_callback_threadsafe runs code in the event loop."""
it_ran = False
def callback():
nonlocal it_ran
it_ran = True
assert hasync.run_callback_threadsafe(hass.loop, callback)
assert it_ran is False
# Verify that async_block_till_done will flush
# out the callback
await hass.async_block_till_done()
assert it_ran is True
async def test_callback_is_always_scheduled(hass):
"""Test run_callback_threadsafe always calls call_soon_threadsafe before checking for shutdown."""
# We have to check the shutdown state AFTER the callback is scheduled otherwise
# the function could continue on and the caller call `future.result()` after
# the point in the main thread where callbacks are no longer run.
callback = MagicMock()
hasync.shutdown_run_callback_threadsafe(hass.loop)
with patch.object(
hass.loop, "call_soon_threadsafe"
) as mock_call_soon_threadsafe, pytest.raises(RuntimeError):
hasync.run_callback_threadsafe(hass.loop, callback)
mock_call_soon_threadsafe.assert_called_once()
| apache-2.0 |
jmohr/conrad | conrad/adapter/rest.py | 1 | 1873 | import json
import logging
import restkit
from base import Base
logger = logging.getLogger(__name__)
class Rest(Base):
def connect(self, uri):
self.uri = uri
self.resource = restkit.Resource(self.uri)
def list(self, resource):
return self.find(resource)
def find(self, resource, conditions={}):
logger.debug('Finding resource {} with conditions {}'.format(
resource, conditions))
if 'id' in conditions:
res = self.resource.get('{}/{}'.format(resource, conditions['id']))
else:
res = self.resource.get(resource, params_dict=conditions)
logger.debug('Find will convert {} to a result dict'.format(res))
return [self.result_dict(res.body_string())]
def delete(self, resource, conditions={}):
if 'id' in conditions:
res = self.resource.delete('{}/{}'.format(resource, conditions['id']))
else:
res = self.resource.delete(resource, params_dict=conditions)
return self.result_dict(res.body_string())
def update(self, resource, attributes={}, conditions={}):
if 'id' in conditions:
res = self.resource.put('{}/{}'.format(resource, conditions['id']), payload=attributes)
else:
res = self.resource.put(resource, params_dict=conditions, payload=attributes)
return self.result_dict(res.body_string())
def create(self, resource, attributes):
logger.debug('Creating {} with attributes {}'.format(resource, attributes))
res = self.resource.post(resource, payload=attributes)
d = [self.result_dict(res.body_string())]
logger.debug('Rest.create() is returning: {}'.format(d))
return d
def result_dict(self, result):
logger.debug('Converting {} to dict'.format(result))
return json.loads(result)
| bsd-3-clause |
Distrotech/qtwebkit | Tools/Scripts/webkitpy/common/webkit_finder.py | 125 | 3344 | # Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class WebKitFinder(object):
def __init__(self, filesystem):
self._filesystem = filesystem
self._webkit_base = None
def webkit_base(self):
"""Returns the absolute path to the top of the WebKit tree.
Raises an AssertionError if the top dir can't be determined."""
# Note: This code somewhat duplicates the code in
# scm.find_checkout_root(). However, that code only works if the top
# of the SCM repository also matches the top of the WebKit tree. Some SVN users
# (the chromium test bots, for example), might only check out subdirectories like
# Tools/Scripts. This code will also work if there is no SCM system at all.
if not self._webkit_base:
self._webkit_base = self._webkit_base
module_path = self._filesystem.path_to_module(self.__module__)
tools_index = module_path.rfind('Tools')
assert tools_index != -1, "could not find location of this checkout from %s" % module_path
self._webkit_base = self._filesystem.normpath(module_path[0:tools_index - 1])
return self._webkit_base
def path_from_webkit_base(self, *comps):
return self._filesystem.join(self.webkit_base(), *comps)
def path_to_script(self, script_name):
"""Returns the relative path to the script from the top of the WebKit tree."""
# This is intentionally relative in order to force callers to consider what
# their current working directory is (and change to the top of the tree if necessary).
return self._filesystem.join("Tools", "Scripts", script_name)
def layout_tests_dir(self):
return self.path_from_webkit_base('LayoutTests')
def perf_tests_dir(self):
return self.path_from_webkit_base('PerformanceTests')
| lgpl-3.0 |
AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/pip/locations.py | 340 | 5626 | """Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import site
import sys
from distutils import sysconfig
from distutils.command.install import install, SCHEME_KEYS # noqa
from pip.compat import WINDOWS, expanduser
from pip.utils import appdirs
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under macOS + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={0} prefix={1}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
if 'install_lib' in d.get_option_dict('install'):
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
| mit |
manipopopo/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/test_util.py | 139 | 1741 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for writing tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class Base(test.TestCase):
"""A class with some useful methods for testing."""
def eval(self, tensors):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
try:
results = sess.run(tensors)
finally:
coord.request_stop()
coord.join(threads)
return results
def assertTensorsEqual(self, tensor_0, tensor_1):
[tensor_0_eval, tensor_1_eval] = self.eval([tensor_0, tensor_1])
self.assertAllEqual(tensor_0_eval, tensor_1_eval)
def assertLabeledTensorsEqual(self, tensor_0, tensor_1):
self.assertEqual(tensor_0.axes, tensor_1.axes)
self.assertTensorsEqual(tensor_0.tensor, tensor_1.tensor)
| apache-2.0 |
saurabhjn76/sympy | sympy/utilities/autowrap.py | 37 | 31887 | """Module for compiling codegen output, and wrap the binary for use in
python.
.. note:: To use the autowrap module it must first be imported
>>> from sympy.utilities.autowrap import autowrap
This module provides a common interface for different external backends, such
as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are
implemented) The goal is to provide access to compiled binaries of acceptable
performance with a one-button user interface, i.e.
>>> from sympy.abc import x,y
>>> expr = ((x - y)**(25)).expand()
>>> binary_callable = autowrap(expr)
>>> binary_callable(1, 2)
-1.0
The callable returned from autowrap() is a binary python function, not a
SymPy object. If it is desired to use the compiled function in symbolic
expressions, it is better to use binary_function() which returns a SymPy
Function object. The binary callable is attached as the _imp_ attribute and
invoked when a numerical evaluation is requested with evalf(), or with
lambdify().
>>> from sympy.utilities.autowrap import binary_function
>>> f = binary_function('f', expr)
>>> 2*f(x, y) + y
y + 2*f(x, y)
>>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2})
0.e-110
The idea is that a SymPy user will primarily be interested in working with
mathematical expressions, and should not have to learn details about wrapping
tools in order to evaluate expressions numerically, even if they are
computationally expensive.
When is this useful?
1) For computations on large arrays, Python iterations may be too slow,
and depending on the mathematical expression, it may be difficult to
exploit the advanced index operations provided by NumPy.
2) For *really* long expressions that will be called repeatedly, the
compiled binary should be significantly faster than SymPy's .evalf()
3) If you are generating code with the codegen utility in order to use
it in another project, the automatic python wrappers let you test the
binaries immediately from within SymPy.
4) To create customized ufuncs for use with numpy arrays.
See *ufuncify*.
When is this module NOT the best approach?
1) If you are really concerned about speed or memory optimizations,
you will probably get better results by working directly with the
wrapper tools and the low level code. However, the files generated
by this utility may provide a useful starting point and reference
code. Temporary files will be left intact if you supply the keyword
tempdir="path/to/files/".
2) If the array computation can be handled easily by numpy, and you
don't need the binaries for another project.
"""
from __future__ import print_function, division
_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'), 'modules': ('numpy',)}
import sys
import os
import shutil
import tempfile
from subprocess import STDOUT, CalledProcessError
from string import Template
from sympy.core.cache import cacheit
from sympy.core.compatibility import check_output, range
from sympy.core.function import Lambda
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.utilities.codegen import (make_routine, get_code_generator,
OutputArgument, InOutArgument, InputArgument,
CodeGenArgumentListError, Result, ResultBase, CCodeGen)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.decorator import doctest_depends_on
class CodeWrapError(Exception):
pass
class CodeWrapper(object):
"""Base Class for code wrappers"""
_filename = "wrapped_code"
_module_basename = "wrapper_module"
_module_counter = 0
@property
def filename(self):
return "%s_%s" % (self._filename, CodeWrapper._module_counter)
@property
def module_name(self):
return "%s_%s" % (self._module_basename, CodeWrapper._module_counter)
def __init__(self, generator, filepath=None, flags=[], verbose=False):
"""
generator -- the code generator to use
"""
self.generator = generator
self.filepath = filepath
self.flags = flags
self.quiet = not verbose
@property
def include_header(self):
return bool(self.filepath)
@property
def include_empty(self):
return bool(self.filepath)
def _generate_code(self, main_routine, routines):
routines.append(main_routine)
self.generator.write(
routines, self.filename, True, self.include_header,
self.include_empty)
def wrap_code(self, routine, helpers=[]):
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routine, helpers)
self._prepare_files(routine)
self._process_files(routine)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, routine.name)
def _process_files(self, routine):
command = self.command
command.extend(self.flags)
try:
retoutput = check_output(command, stderr=STDOUT)
except CalledProcessError as e:
raise CodeWrapError(
"Error while executing command: %s. Command output is:\n%s" % (
" ".join(command), e.output.decode()))
if not self.quiet:
print(retoutput)
class DummyWrapper(CodeWrapper):
"""Class used for testing independent of backends """
template = """# dummy module for testing of SymPy
def %(name)s():
return "%(expr)s"
%(name)s.args = "%(args)s"
%(name)s.returns = "%(retvals)s"
"""
def _prepare_files(self, routine):
return
def _generate_code(self, routine, helpers):
with open('%s.py' % self.module_name, 'w') as f:
printed = ", ".join(
[str(res.expr) for res in routine.result_variables])
# convert OutputArguments to return value like f2py
args = filter(lambda x: not isinstance(
x, OutputArgument), routine.arguments)
retvals = []
for val in routine.result_variables:
if isinstance(val, Result):
retvals.append('nameless')
else:
retvals.append(val.result_var)
print(DummyWrapper.template % {
'name': routine.name,
'expr': printed,
'args': ", ".join([str(a.name) for a in args]),
'retvals': ", ".join([str(val) for val in retvals])
}, end="", file=f)
def _process_files(self, routine):
return
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
class CythonCodeWrapper(CodeWrapper):
"""Wrapper that uses Cython"""
setup_template = (
"from distutils.core import setup\n"
"from distutils.extension import Extension\n"
"from Cython.Distutils import build_ext\n"
"{np_import}"
"\n"
"setup(\n"
" cmdclass = {{'build_ext': build_ext}},\n"
" ext_modules = [Extension({ext_args},\n"
" extra_compile_args=['-std=c99'])],\n"
"{np_includes}"
" )")
pyx_imports = (
"import numpy as np\n"
"cimport numpy as np\n\n")
pyx_header = (
"cdef extern from '{header_file}.h':\n"
" {prototype}\n\n")
pyx_func = (
"def {name}_c({arg_string}):\n"
"\n"
"{declarations}"
"{body}")
def __init__(self, *args, **kwargs):
super(CythonCodeWrapper, self).__init__(*args, **kwargs)
self._need_numpy = False
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine):
pyxfilename = self.module_name + '.pyx'
codefilename = "%s.%s" % (self.filename, self.generator.code_extension)
# pyx
with open(pyxfilename, 'w') as f:
self.dump_pyx([routine], f, self.filename)
# setup.py
ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])]
if self._need_numpy:
np_import = 'import numpy as np\n'
np_includes = ' include_dirs = [np.get_include()],\n'
else:
np_import = ''
np_includes = ''
with open('setup.py', 'w') as f:
f.write(self.setup_template.format(ext_args=", ".join(ext_args),
np_import=np_import,
np_includes=np_includes))
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name + '_c')
def dump_pyx(self, routines, f, prefix):
"""Write a Cython file with python wrappers
This file contains all the definitions of the routines in c code and
refers to the header file.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
"""
headers = []
functions = []
for routine in routines:
prototype = self.generator.get_prototype(routine)
# C Function Header Import
headers.append(self.pyx_header.format(header_file=prefix,
prototype=prototype))
# Partition the C function arguments into categories
py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments)
# Function prototype
name = routine.name
arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args)
# Local Declarations
local_decs = []
for arg, val in py_inf.items():
proto = self._prototype_arg(arg)
mat, ind = val
local_decs.append(" cdef {0} = {1}.shape[{2}]".format(proto, mat, ind))
local_decs.extend([" cdef {0}".format(self._declare_arg(a)) for a in py_loc])
declarations = "\n".join(local_decs)
if declarations:
declarations = declarations + "\n"
# Function Body
args_c = ", ".join([self._call_arg(a) for a in routine.arguments])
rets = ", ".join([str(r.name) for r in py_rets])
if routine.results:
body = ' return %s(%s)' % (routine.name, args_c)
if rets:
body = body + ', ' + rets
else:
body = ' %s(%s)\n' % (routine.name, args_c)
body = body + ' return ' + rets
functions.append(self.pyx_func.format(name=name, arg_string=arg_string,
declarations=declarations, body=body))
# Write text to file
if self._need_numpy:
# Only import numpy if required
f.write(self.pyx_imports)
f.write('\n'.join(headers))
f.write('\n'.join(functions))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_args = []
py_returns = []
py_locals = []
py_inferred = {}
for arg in args:
if isinstance(arg, OutputArgument):
py_returns.append(arg)
py_locals.append(arg)
elif isinstance(arg, InOutArgument):
py_returns.append(arg)
py_args.append(arg)
else:
py_args.append(arg)
# Find arguments that are array dimensions. These can be inferred
# locally in the Cython code.
if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions:
dims = [d[1] + 1 for d in arg.dimensions]
sym_dims = [(i, d) for (i, d) in enumerate(dims) if isinstance(d, Symbol)]
for (i, d) in sym_dims:
py_inferred[d] = (arg.name, i)
for arg in args:
if arg.name in py_inferred:
py_inferred[arg] = py_inferred.pop(arg.name)
# Filter inferred arguments from py_args
py_args = [a for a in py_args if a not in py_inferred]
return py_returns, py_args, py_locals, py_inferred
def _prototype_arg(self, arg):
mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}"
np_types = {'double': 'np.double_t',
'int': 'np.int_t'}
t = arg.get_datatype('c')
if arg.dimensions:
self._need_numpy = True
ndim = len(arg.dimensions)
mtype = np_types[t]
return mat_dec.format(mtype=mtype, ndim=ndim, name=arg.name)
else:
return "%s %s" % (t, str(arg.name))
def _declare_arg(self, arg):
proto = self._prototype_arg(arg)
if arg.dimensions:
shape = '(' + ','.join(str(i[1] + 1) for i in arg.dimensions) + ')'
return proto + " = np.empty({shape})".format(shape=shape)
else:
return proto + " = 0"
def _call_arg(self, arg):
if arg.dimensions:
t = arg.get_datatype('c')
return "<{0}*> {1}.data".format(t, arg.name)
elif isinstance(arg, ResultBase):
return "&{0}".format(arg.name)
else:
return str(arg.name)
class F2PyCodeWrapper(CodeWrapper):
"""Wrapper that uses f2py"""
@property
def command(self):
filename = self.filename + '.' + self.generator.code_extension
args = ['-c', '-m', self.module_name, filename]
command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args
return command
def _prepare_files(self, routine):
pass
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def _get_code_wrapper_class(backend):
wrappers = {'F2PY': F2PyCodeWrapper, 'CYTHON': CythonCodeWrapper,
'DUMMY': DummyWrapper}
return wrappers[backend.upper()]
# Here we define a lookup of backends -> tuples of languages. For now, each
# tuple is of length 1, but if a backend supports more than one language,
# the most preferable language is listed first.
_lang_lookup = {'CYTHON': ('C',),
'F2PY': ('F95',),
'NUMPY': ('C',),
'DUMMY': ('F95',)} # Dummy here just for testing
def _infer_language(backend):
"""For a given backend, return the top choice of language"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
return langs[0]
def _validate_backend_language(backend, language):
"""Throws error if backend and language are incompatible"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
if language.upper() not in langs:
raise ValueError(("Backend {0} and language {1} are "
"incompatible").format(backend, language))
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def autowrap(
expr, language=None, backend='f2py', tempdir=None, args=None, flags=None,
verbose=False, helpers=None):
"""Generates python callable binaries based on the math expression.
Parameters
----------
expr
The SymPy expression that should be wrapped as a binary routine.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'f2py' [default],
or 'cython'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
args : iterable, optional
An iterable of symbols. Specifies the argument sequence for the function.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : iterable, optional
Used to define auxillary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be put
in the ``helpers`` iterable. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be tuples with (<funtion_name>, <sympy_expression>, <arguments>). It
is mandatory to supply an argument sequence to helper routines.
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.autowrap import autowrap
>>> expr = ((x - y + z)**(13)).expand()
>>> binary_func = autowrap(expr)
>>> binary_func(1, 4, 2)
-1.0
"""
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
code_generator = get_code_generator(language, "autowrap")
CodeWrapperClass = _get_code_wrapper_class(backend)
code_wrapper = CodeWrapperClass(code_generator, tempdir, flags, verbose)
try:
routine = make_routine('autofunc', expr, args)
except CodeGenArgumentListError as e:
# if all missing arguments are for pure output, we simply attach them
# at the end and try again, because the wrappers will silently convert
# them to return values anyway.
new_args = []
for missing in e.missing_args:
if not isinstance(missing, OutputArgument):
raise
new_args.append(missing.name)
routine = make_routine('autofunc', expr, args + new_args)
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
return code_wrapper.wrap_code(routine, helpers=helps)
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def binary_function(symfunc, expr, **kwargs):
"""Returns a sympy function with expr as binary implementation
This is a convenience function that automates the steps needed to
autowrap the SymPy expression and attaching it to a Function object
with implemented_function().
>>> from sympy.abc import x, y
>>> from sympy.utilities.autowrap import binary_function
>>> expr = ((x - y)**(25)).expand()
>>> f = binary_function('f', expr)
>>> type(f)
<class 'sympy.core.function.UndefinedFunction'>
>>> 2*f(x, y)
2*f(x, y)
>>> f(x, y).evalf(2, subs={x: 1, y: 2})
-1.0
"""
binary = autowrap(expr, **kwargs)
return implemented_function(symfunc, binary)
#################################################################
# UFUNCIFY #
#################################################################
_ufunc_top = Template("""\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include ${include_file}
static PyMethodDef ${module}Methods[] = {
{NULL, NULL, 0, NULL}
};""")
_ufunc_body = Template("""\
static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
${declare_args}
${declare_steps}
for (i = 0; i < n; i++) {
*((double *)out1) = ${funcname}(${call_args});
${step_increments}
}
}
PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc};
static char ${funcname}_types[${n_types}] = ${types}
static void *${funcname}_data[1] = {NULL};""")
_ufunc_bottom = Template("""\
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"${module}",
NULL,
-1,
${module}Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_${module}(void)
{
PyObject *m, *d;
${function_creation}
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
return m;
}
#else
PyMODINIT_FUNC init${module}(void)
{
PyObject *m, *d;
${function_creation}
m = Py_InitModule("${module}", ${module}Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
}
#endif\
""")
_ufunc_init_form = Template("""\
ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out},
PyUFunc_None, "${module}", ${docstring}, 0);
PyDict_SetItemString(d, "${funcname}", ufunc${ind});
Py_DECREF(ufunc${ind});""")
_ufunc_setup = Template("""\
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('',
parent_package,
top_path)
config.add_extension('${module}', sources=['${module}.c', '${filename}.c'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)""")
class UfuncifyCodeWrapper(CodeWrapper):
"""Wrapper for Ufuncify"""
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine):
# C
codefilename = self.module_name + '.c'
with open(codefilename, 'w') as f:
self.dump_c([routine], f, self.filename)
# setup.py
with open('setup.py', 'w') as f:
self.dump_setup(f)
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def dump_setup(self, f):
setup = _ufunc_setup.substitute(module=self.module_name,
filename=self.filename)
f.write(setup)
def dump_c(self, routines, f, prefix):
"""Write a C file with python wrappers
This file contains all the definitions of the routines in c code.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to name the imported module.
"""
functions = []
function_creation = []
ufunc_init = []
module = self.module_name
include_file = "\"{0}.h\"".format(prefix)
top = _ufunc_top.substitute(include_file=include_file, module=module)
for r_index, routine in enumerate(routines):
name = routine.name
# Partition the C function arguments into categories
py_in, py_out = self._partition_args(routine.arguments)
n_in = len(py_in)
n_out = 1
# Declare Args
form = "char *{0}{1} = args[{2}];"
arg_decs = [form.format('in', i, i) for i in range(n_in)]
arg_decs.append(form.format('out', 1, n_in))
declare_args = '\n '.join(arg_decs)
# Declare Steps
form = "npy_intp {0}{1}_step = steps[{2}];"
step_decs = [form.format('in', i, i) for i in range(n_in)]
step_decs.append(form.format('out', 1, n_in))
declare_steps = '\n '.join(step_decs)
# Call Args
form = "*(double *)in{0}"
call_args = ', '.join([form.format(a) for a in range(n_in)])
# Step Increments
form = "{0}{1} += {0}{1}_step;"
step_incs = [form.format('in', i) for i in range(n_in)]
step_incs.append(form.format('out', 1))
step_increments = '\n '.join(step_incs)
# Types
n_types = n_in + n_out
types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};"
# Docstring
docstring = '"Created in SymPy with Ufuncify"'
# Function Creation
function_creation.append("PyObject *ufunc{0};".format(r_index))
# Ufunc initialization
init_form = _ufunc_init_form.substitute(module=module,
funcname=name,
docstring=docstring,
n_in=n_in, n_out=n_out,
ind=r_index)
ufunc_init.append(init_form)
body = _ufunc_body.substitute(module=module, funcname=name,
declare_args=declare_args,
declare_steps=declare_steps,
call_args=call_args,
step_increments=step_increments,
n_types=n_types, types=types)
functions.append(body)
body = '\n\n'.join(functions)
ufunc_init = '\n '.join(ufunc_init)
function_creation = '\n '.join(function_creation)
bottom = _ufunc_bottom.substitute(module=module,
ufunc_init=ufunc_init,
function_creation=function_creation)
text = [top, body, bottom]
f.write('\n\n'.join(text))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_in = []
py_out = []
for arg in args:
if isinstance(arg, OutputArgument):
if py_out:
msg = "Ufuncify doesn't support multiple OutputArguments"
raise ValueError(msg)
py_out.append(arg)
elif isinstance(arg, InOutArgument):
raise ValueError("Ufuncify doesn't support InOutArguments")
else:
py_in.append(arg)
return py_in, py_out
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',))
def ufuncify(args, expr, language=None, backend='numpy', tempdir=None,
flags=None, verbose=False, helpers=None):
"""Generates a binary function that supports broadcasting on numpy arrays.
Parameters
----------
args : iterable
Either a Symbol or an iterable of symbols. Specifies the argument
sequence for the function.
expr
A SymPy expression that defines the element wise operation.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'numpy' [default],
'cython', or 'f2py'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
flags : iterable, optional
Additional option flags that will be passed to the backend
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : iterable, optional
Used to define auxillary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be put
in the ``helpers`` iterable. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be tuples with (<funtion_name>, <sympy_expression>, <arguments>). It
is mandatory to supply an argument sequence to helper routines.
Note
----
The default backend ('numpy') will create actual instances of
``numpy.ufunc``. These support ndimensional broadcasting, and implicit type
conversion. Use of the other backends will result in a "ufunc-like"
function, which requires equal length 1-dimensional arrays for all
arguments, and will not perform any type conversions.
References
----------
[1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
Examples
========
>>> from sympy.utilities.autowrap import ufuncify
>>> from sympy.abc import x, y
>>> import numpy as np
>>> f = ufuncify((x, y), y + x**2)
>>> type(f)
numpy.ufunc
>>> f([1, 2, 3], 2)
array([ 3., 6., 11.])
>>> f(np.arange(5), 3)
array([ 3., 4., 7., 12., 19.])
For the F2Py and Cython backends, inputs are required to be equal length
1-dimensional arrays. The F2Py backend will perform type conversion, but
the Cython backend will error if the inputs are not of the expected type.
>>> f_fortran = ufuncify((x, y), y + x**2, backend='F2Py')
>>> f_fortran(1, 2)
3
>>> f_fortran(numpy.array([1, 2, 3]), numpy.array([1.0, 2.0, 3.0]))
array([2., 6., 12.])
>>> f_cython = ufuncify((x, y), y + x**2, backend='Cython')
>>> f_cython(1, 2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int)
>>> f_cython(numpy.array([1.0]), numpy.array([2.0]))
array([ 3.])
"""
if isinstance(args, Symbol):
args = (args,)
else:
args = tuple(args)
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
if backend.upper() == 'NUMPY':
routine = make_routine('autofunc', expr, args)
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
code_wrapper = UfuncifyCodeWrapper(CCodeGen("ufuncify"), tempdir,
flags, verbose)
return code_wrapper.wrap_code(routine, helpers=helps)
else:
# Dummies are used for all added expressions to prevent name clashes
# within the original expression.
y = IndexedBase(Dummy())
m = Dummy(integer=True)
i = Idx(Dummy(integer=True), m)
f = implemented_function(Dummy().name, Lambda(args, expr))
# For each of the args create an indexed version.
indexed_args = [IndexedBase(Dummy(str(a))) for a in args]
# Order the arguments (out, args, dim)
args = [y] + indexed_args + [m]
args_with_indices = [a[i] for a in indexed_args]
return autowrap(Eq(y[i], f(*args_with_indices)), language, backend,
tempdir, args, flags, verbose, helpers)
| bsd-3-clause |
pyprism/Hiren-Pass | hiren/settings.py | 2 | 7086 | """
Django settings for hiren project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import json
import datetime
import raven
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
try:
with open(BASE_DIR + '/' + 'config.local.json') as f:
JSON_DATA = json.load(f)
except FileNotFoundError:
with open(BASE_DIR + '/' + 'config.json') as f:
JSON_DATA = json.load(f)
SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key'])
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'password',
'rest_framework.authtoken',
'corsheaders',
'base'
]
if DEBUG is False:
INSTALLED_APPS += [
'raven.contrib.django.raven_compat',
]
if DEBUG:
INSTALLED_APPS += [
'silk'
]
MIDDLEWARE_CLASSES = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if DEBUG:
MIDDLEWARE_CLASSES += [
'silk.middleware.SilkyMiddleware',
]
ROOT_URLCONF = 'hiren.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'bunny/build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'NAME': JSON_DATA['db_name'],
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': JSON_DATA['db_user'],
'PASSWORD': JSON_DATA['db_password'],
'HOST': 'localhost',
'PORT': '',
'CONN_MAX_AGE': 600,
'atomic': True
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = False
USE_L10N = False
USE_TZ = False
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'bunny/build/static'),
]
# custom user model
AUTH_USER_MODEL = 'base.Account'
# DRF
DEFAULT_RENDERER_CLASSES = (
'rest_framework.renderers.JSONRenderer',
)
if DEBUG:
DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (
'rest_framework.renderers.BrowsableAPIRenderer',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_RENDERER_CLASSES': DEFAULT_RENDERER_CLASSES,
# 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
# 'PAGE_SIZE': 20
}
# sentry.io
if not DEBUG:
RAVEN_CONFIG = {
'dsn': JSON_DATA['sentry_dsn'],
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(os.path.dirname(os.pardir)),
}
# logger
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'formatters': {
'main_formatter': {
'format': '%(levelname)s:%(name)s: %(message)s '
'(%(asctime)s; %(filename)s:%(lineno)d)',
'datefmt': "%Y-%m-%d %H:%M:%S",
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'main_formatter',
},
'production_file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR + '/logs/main.log',
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 7,
'formatter': 'main_formatter',
'filters': ['require_debug_false'],
},
'debug_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR + '/logs/main_debug.log',
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 7,
'formatter': 'main_formatter',
'filters': ['require_debug_true'],
},
'null': {
"class": 'logging.NullHandler',
}
},
'loggers': {
# 'django.db.backends': { # enable sql log during development
# 'level': 'DEBUG',
# 'handlers': ['console'],
# },
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['null', ],
},
'py.warnings': {
'handlers': ['null', ],
},
'': {
'handlers': ['console', 'production_file', 'debug_file'],
'level': "DEBUG",
},
}
}
# CORS
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/api/.*$'
CORS_ALLOW_METHODS = (
'GET',
'POST'
)
| mit |
scrazy77/p2pool | p2pool/util/skiplist.py | 278 | 2140 | from p2pool.util import math, memoize
class SkipList(object):
def __init__(self, p=0.5):
self.p = p
self.skips = {}
def forget_item(self, item):
self.skips.pop(item, None)
@memoize.memoize_with_backing(memoize.LRUDict(5))
def __call__(self, start, *args):
updates = {}
pos = start
sol = self.initial_solution(start, args)
if self.judge(sol, args) == 0:
return self.finalize(sol, args)
while True:
if pos not in self.skips:
self.skips[pos] = math.geometric(self.p), [(self.previous(pos), self.get_delta(pos))]
skip_length, skip = self.skips[pos]
# fill previous updates
for i in xrange(skip_length):
if i in updates:
that_hash, delta = updates.pop(i)
x, y = self.skips[that_hash]
assert len(y) == i
y.append((pos, delta))
# put desired skip nodes in updates
for i in xrange(len(skip), skip_length):
updates[i] = pos, None
#if skip_length + 1 in updates:
# updates[skip_length + 1] = self.combine(updates[skip_length + 1], updates[skip_length])
for jump, delta in reversed(skip):
sol_if = self.apply_delta(sol, delta, args)
decision = self.judge(sol_if, args)
#print pos, sol, jump, delta, sol_if, decision
if decision == 0:
return self.finalize(sol_if, args)
elif decision < 0:
sol = sol_if
break
else:
raise AssertionError()
sol = sol_if
pos = jump
# XXX could be better by combining updates
for x in updates:
updates[x] = updates[x][0], self.combine_deltas(updates[x][1], delta) if updates[x][1] is not None else delta
def finalize(self, sol, args):
return sol
| gpl-3.0 |
replicatorg/ReplicatorG | skein_engines/skeinforge-50/fabmetheus_utilities/geometry/geometry_utilities/evaluate.py | 12 | 68763 | """
Evaluate expressions.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities.evaluate_elements import setting
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
import math
import os
import sys
import traceback
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalModuleFunctionsDictionary = {}
def addPrefixDictionary(dictionary, keys, value):
'Add prefixed key values to dictionary.'
for key in keys:
dictionary[key.lstrip('_')] = value
def addQuoteWord(evaluatorWords, word):
'Add quote word and remainder if the word starts with a quote character or dollar sign, otherwise add the word.'
if len(word) < 2:
evaluatorWords.append(word)
return
firstCharacter = word[0]
if firstCharacter == '$':
dotIndex = word.find('.', 1)
if dotIndex > -1:
evaluatorWords.append(word[: dotIndex])
evaluatorWords.append(word[dotIndex :])
return
if firstCharacter != '"' and firstCharacter != "'":
evaluatorWords.append(word)
return
nextQuoteIndex = word.find(firstCharacter, 1)
if nextQuoteIndex < 0 or nextQuoteIndex == len(word) - 1:
evaluatorWords.append(word)
return
nextQuoteIndex += 1
evaluatorWords.append(word[: nextQuoteIndex])
evaluatorWords.append(word[nextQuoteIndex :])
def addToPathsRecursively(paths, vector3Lists):
'Add to vector3 paths recursively.'
if vector3Lists.__class__ == Vector3 or vector3Lists.__class__ .__name__ == 'Vector3Index':
paths.append([ vector3Lists ])
return
path = []
for vector3List in vector3Lists:
if vector3List.__class__ == list:
addToPathsRecursively(paths, vector3List)
elif vector3List.__class__ == Vector3:
path.append(vector3List)
if len(path) > 0:
paths.append(path)
def addValueToEvaluatedDictionary(elementNode, evaluatedDictionary, key):
'Get the evaluated dictionary.'
value = getEvaluatedValueObliviously(elementNode, key)
if value == None:
valueString = str(elementNode.attributes[key])
print('Warning, addValueToEvaluatedDictionary in evaluate can not get a value for:')
print(valueString)
evaluatedDictionary[key + '__Warning__'] = 'Can not evaluate: ' + valueString.replace('"', ' ').replace( "'", ' ')
else:
evaluatedDictionary[key] = value
def addVector3ToElementNode(elementNode, key, vector3):
'Add vector3 to xml element.'
elementNode.attributes[key] = '[%s,%s,%s]' % (vector3.x, vector3.y, vector3.z)
def compareExecutionOrderAscending(module, otherModule):
'Get comparison in order to sort modules in ascending execution order.'
if module.globalExecutionOrder < otherModule.globalExecutionOrder:
return -1
if module.globalExecutionOrder > otherModule.globalExecutionOrder:
return 1
if module.__name__ < otherModule.__name__:
return -1
return int(module.__name__ > otherModule.__name__)
def convertToPaths(dictionary):
'Recursively convert any ElementNodes to paths.'
if dictionary.__class__ == Vector3 or dictionary.__class__.__name__ == 'Vector3Index':
return
keys = getKeys(dictionary)
if keys == None:
return
for key in keys:
value = dictionary[key]
if value.__class__.__name__ == 'ElementNode':
if value.xmlObject != None:
dictionary[key] = getFloatListListsByPaths(value.xmlObject.getPaths())
else:
convertToPaths(dictionary[key])
def convertToTransformedPaths(dictionary):
'Recursively convert any ElementNodes to paths.'
if dictionary.__class__ == Vector3 or dictionary.__class__.__name__ == 'Vector3Index':
return
keys = getKeys(dictionary)
if keys == None:
return
for key in keys:
value = dictionary[key]
if value.__class__.__name__ == 'ElementNode':
if value.xmlObject != None:
dictionary[key] = value.xmlObject.getTransformedPaths()
else:
convertToTransformedPaths(dictionary[key])
def executeLeftOperations( evaluators, operationLevel ):
'Evaluate the expression value from the numeric and operation evaluators.'
for negativeIndex in xrange( - len(evaluators), - 1 ):
evaluatorIndex = negativeIndex + len(evaluators)
evaluators[evaluatorIndex].executeLeftOperation( evaluators, evaluatorIndex, operationLevel )
def executeNextEvaluatorArguments(evaluator, evaluators, evaluatorIndex, nextEvaluator):
'Execute the nextEvaluator arguments.'
if evaluator.value == None:
print('Warning, executeNextEvaluatorArguments in evaluate can not get a evaluator.value for:')
print(evaluatorIndex)
print(evaluators)
print(evaluator)
return
nextEvaluator.value = evaluator.value(*nextEvaluator.arguments)
del evaluators[evaluatorIndex]
def executePairOperations(evaluators, operationLevel):
'Evaluate the expression value from the numeric and operation evaluators.'
for negativeIndex in xrange(1 - len(evaluators), - 1):
evaluatorIndex = negativeIndex + len(evaluators)
evaluators[evaluatorIndex].executePairOperation(evaluators, evaluatorIndex, operationLevel)
def getBracketEvaluators(bracketBeginIndex, bracketEndIndex, evaluators):
'Get the bracket evaluators.'
return getEvaluatedExpressionValueEvaluators(evaluators[bracketBeginIndex + 1 : bracketEndIndex])
def getBracketsExist(evaluators):
'Evaluate the expression value.'
bracketBeginIndex = None
for negativeIndex in xrange( - len(evaluators), 0 ):
bracketEndIndex = negativeIndex + len(evaluators)
evaluatorEnd = evaluators[ bracketEndIndex ]
evaluatorWord = evaluatorEnd.word
if evaluatorWord in ['(', '[', '{']:
bracketBeginIndex = bracketEndIndex
elif evaluatorWord in [')', ']', '}']:
if bracketBeginIndex == None:
print('Warning, bracketBeginIndex in evaluateBrackets in evaluate is None.')
print('This may be because the brackets are not balanced.')
print(evaluators)
del evaluators[ bracketEndIndex ]
return
evaluators[ bracketBeginIndex ].executeBracket(bracketBeginIndex, bracketEndIndex, evaluators)
evaluators[ bracketBeginIndex ].word = None
return True
return False
def getBracketValuesDeleteEvaluator(bracketBeginIndex, bracketEndIndex, evaluators):
'Get the bracket values and delete the evaluator.'
evaluatedExpressionValueEvaluators = getBracketEvaluators(bracketBeginIndex, bracketEndIndex, evaluators)
bracketValues = []
for evaluatedExpressionValueEvaluator in evaluatedExpressionValueEvaluators:
bracketValues.append( evaluatedExpressionValueEvaluator.value )
del evaluators[ bracketBeginIndex + 1: bracketEndIndex + 1 ]
return bracketValues
def getCapitalizedSuffixKey(prefix, suffix):
'Get key with capitalized suffix.'
if prefix == '' or prefix.endswith('.'):
return prefix + suffix
return prefix + suffix[:1].upper()+suffix[1:]
def getDictionarySplitWords(dictionary, value):
'Get split line for evaluators.'
if getIsQuoted(value):
return [value]
for dictionaryKey in dictionary.keys():
value = value.replace(dictionaryKey, ' ' + dictionaryKey + ' ')
dictionarySplitWords = []
for word in value.split():
dictionarySplitWords.append(word)
return dictionarySplitWords
def getElementNodeByKey(elementNode, key):
'Get the xml element by key.'
if key not in elementNode.attributes:
return None
word = str(elementNode.attributes[key]).strip()
evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word)
if evaluatedLinkValue.__class__.__name__ == 'ElementNode':
return evaluatedLinkValue
print('Warning, could not get ElementNode in getElementNodeByKey in evaluate for:')
print(key)
print(evaluatedLinkValue)
print(elementNode)
return None
def getElementNodeObject(evaluatedLinkValue):
'Get ElementNodeObject.'
if evaluatedLinkValue.__class__.__name__ != 'ElementNode':
print('Warning, could not get ElementNode in getElementNodeObject in evaluate for:')
print(evaluatedLinkValue.__class__.__name__)
print(evaluatedLinkValue)
return None
if evaluatedLinkValue.xmlObject == None:
print('Warning, evaluatedLinkValue.xmlObject is None in getElementNodeObject in evaluate for:')
print(evaluatedLinkValue)
return None
return evaluatedLinkValue.xmlObject
def getElementNodesByKey(elementNode, key):
'Get the xml elements by key.'
if key not in elementNode.attributes:
return []
word = str(elementNode.attributes[key]).strip()
evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word)
if evaluatedLinkValue.__class__.__name__ == 'ElementNode':
return [evaluatedLinkValue]
if evaluatedLinkValue.__class__ == list:
return evaluatedLinkValue
print('Warning, could not get ElementNodes in getElementNodesByKey in evaluate for:')
print(key)
print(evaluatedLinkValue)
print(elementNode)
return []
def getEndIndexConvertEquationValue( bracketEndIndex, evaluatorIndex, evaluators ):
'Get the bracket end index and convert the equation value evaluators into a string.'
evaluator = evaluators[evaluatorIndex]
if evaluator.__class__ != EvaluatorValue:
return bracketEndIndex
if not evaluator.word.startswith('equation.'):
return bracketEndIndex
if evaluators[ evaluatorIndex + 1 ].word != ':':
return bracketEndIndex
valueBeginIndex = evaluatorIndex + 2
equationValueString = ''
for valueEvaluatorIndex in xrange( valueBeginIndex, len(evaluators) ):
valueEvaluator = evaluators[ valueEvaluatorIndex ]
if valueEvaluator.word == ',' or valueEvaluator.word == '}':
if equationValueString == '':
return bracketEndIndex
else:
evaluators[ valueBeginIndex ] = EvaluatorValue( equationValueString )
valueDeleteIndex = valueBeginIndex + 1
del evaluators[ valueDeleteIndex : valueEvaluatorIndex ]
return bracketEndIndex - valueEvaluatorIndex + valueDeleteIndex
equationValueString += valueEvaluator.word
return bracketEndIndex
def getEvaluatedBoolean(defaultValue, elementNode, key):
'Get the evaluated boolean.'
if elementNode == None:
return defaultValue
if key in elementNode.attributes:
return euclidean.getBooleanFromValue(getEvaluatedValueObliviously(elementNode, key))
return defaultValue
def getEvaluatedDictionaryByCopyKeys(copyKeys, elementNode):
'Get the evaluated dictionary by copyKeys.'
evaluatedDictionary = {}
for key in elementNode.attributes.keys():
if key in copyKeys:
evaluatedDictionary[key] = elementNode.attributes[key]
else:
addValueToEvaluatedDictionary(elementNode, evaluatedDictionary, key)
return evaluatedDictionary
def getEvaluatedDictionaryByEvaluationKeys(elementNode, evaluationKeys):
'Get the evaluated dictionary.'
evaluatedDictionary = {}
for key in elementNode.attributes.keys():
if key in evaluationKeys:
addValueToEvaluatedDictionary(elementNode, evaluatedDictionary, key)
return evaluatedDictionary
def getEvaluatedExpressionValue(elementNode, value):
'Evaluate the expression value.'
try:
return getEvaluatedExpressionValueBySplitLine(elementNode, getEvaluatorSplitWords(value))
except:
print('Warning, in getEvaluatedExpressionValue in evaluate could not get a value for:')
print(value)
traceback.print_exc(file=sys.stdout)
return None
def getEvaluatedExpressionValueBySplitLine(elementNode, words):
'Evaluate the expression value.'
evaluators = []
for wordIndex, word in enumerate(words):
nextWord = ''
nextWordIndex = wordIndex + 1
if nextWordIndex < len(words):
nextWord = words[nextWordIndex]
evaluator = getEvaluator(elementNode, evaluators, nextWord, word)
if evaluator != None:
evaluators.append(evaluator)
while getBracketsExist(evaluators):
pass
evaluatedExpressionValueEvaluators = getEvaluatedExpressionValueEvaluators(evaluators)
if len( evaluatedExpressionValueEvaluators ) > 0:
return evaluatedExpressionValueEvaluators[0].value
return None
def getEvaluatedExpressionValueEvaluators(evaluators):
'Evaluate the expression value from the numeric and operation evaluators.'
for evaluatorIndex, evaluator in enumerate(evaluators):
evaluator.executeCenterOperation(evaluators, evaluatorIndex)
for negativeIndex in xrange(1 - len(evaluators), 0):
evaluatorIndex = negativeIndex + len(evaluators)
evaluators[evaluatorIndex].executeRightOperation(evaluators, evaluatorIndex)
executeLeftOperations(evaluators, 200)
for operationLevel in [80, 60, 40, 20, 15]:
executePairOperations(evaluators, operationLevel)
executeLeftOperations(evaluators, 13)
executePairOperations(evaluators, 12)
for negativeIndex in xrange(-len(evaluators), 0):
evaluatorIndex = negativeIndex + len(evaluators)
evaluators[evaluatorIndex].executePairOperation(evaluators, evaluatorIndex, 10)
for evaluatorIndex in xrange(len(evaluators) - 1, -1, -1):
evaluators[evaluatorIndex].executePairOperation(evaluators, evaluatorIndex, 0)
return evaluators
def getEvaluatedFloat(defaultValue, elementNode, key):
'Get the evaluated float.'
if elementNode == None:
return defaultValue
if key in elementNode.attributes:
return euclidean.getFloatFromValue(getEvaluatedValueObliviously(elementNode, key))
return defaultValue
def getEvaluatedInt(defaultValue, elementNode, key):
'Get the evaluated int.'
if elementNode == None:
return None
if key in elementNode.attributes:
try:
return getIntFromFloatString(getEvaluatedValueObliviously(elementNode, key))
except:
print('Warning, could not evaluate the int.')
print(key)
print(elementNode.attributes[key])
return defaultValue
def getEvaluatedIntByKeys(defaultValue, elementNode, keys):
'Get the evaluated int by keys.'
for key in keys:
defaultValue = getEvaluatedInt(defaultValue, elementNode, key)
return defaultValue
def getEvaluatedLinkValue(elementNode, word):
'Get the evaluated link value.'
if word == '':
return ''
if getStartsWithCurlyEqualRoundSquare(word):
return getEvaluatedExpressionValue(elementNode, word)
return word
def getEvaluatedString(defaultValue, elementNode, key):
'Get the evaluated string.'
if elementNode == None:
return defaultValue
if key in elementNode.attributes:
return str(getEvaluatedValueObliviously(elementNode, key))
return defaultValue
def getEvaluatedValue(defaultValue, elementNode, key):
'Get the evaluated value.'
if elementNode == None:
return defaultValue
if key in elementNode.attributes:
return getEvaluatedValueObliviously(elementNode, key)
return defaultValue
def getEvaluatedValueObliviously(elementNode, key):
'Get the evaluated value.'
value = str(elementNode.attributes[key]).strip()
if key == 'id' or key == 'name' or key == 'tags':
return value
return getEvaluatedLinkValue(elementNode, value)
def getEvaluator(elementNode, evaluators, nextWord, word):
'Get the evaluator.'
if word in globalSplitDictionary:
return globalSplitDictionary[word](elementNode, word)
firstCharacter = word[: 1]
if firstCharacter == "'" or firstCharacter == '"':
if len(word) > 1:
if firstCharacter == word[-1]:
return EvaluatorValue(word[1 : -1])
if firstCharacter == '$':
return EvaluatorValue(word[1 :])
dotIndex = word.find('.')
functions = elementNode.getXMLProcessor().functions
if dotIndex > -1 and len(word) > 1:
if dotIndex == 0 and word[1].isalpha():
return EvaluatorAttribute(elementNode, word)
if dotIndex > 0:
untilDot = word[: dotIndex]
if untilDot in globalModuleEvaluatorDictionary:
return globalModuleEvaluatorDictionary[untilDot](elementNode, word)
if len(functions) > 0:
if untilDot in functions[-1].localDictionary:
return EvaluatorLocal(elementNode, word)
if firstCharacter.isalpha() or firstCharacter == '_':
if len(functions) > 0:
if word in functions[-1].localDictionary:
return EvaluatorLocal(elementNode, word)
wordElement = elementNode.getElementNodeByID(word)
if wordElement != None:
if wordElement.getNodeName() == 'class':
return EvaluatorClass(wordElement, word)
if wordElement.getNodeName() == 'function':
return EvaluatorFunction(wordElement, word)
return EvaluatorValue(word)
return EvaluatorNumeric(elementNode, word)
def getEvaluatorSplitWords(value):
'Get split words for evaluators.'
if value.startswith('='):
value = value[len('=') :]
if len(value) < 1:
return []
global globalDictionaryOperatorBegin
uniqueQuoteIndex = 0
word = ''
quoteString = None
quoteDictionary = {}
for characterIndex in xrange(len(value)):
character = value[characterIndex]
if character == '"' or character == "'":
if quoteString == None:
quoteString = ''
elif quoteString != None:
if character == quoteString[: 1]:
uniqueQuoteIndex = getUniqueQuoteIndex(uniqueQuoteIndex, value)
uniqueToken = getTokenByNumber(uniqueQuoteIndex)
quoteDictionary[uniqueToken] = quoteString + character
character = uniqueToken
quoteString = None
if quoteString == None:
word += character
else:
quoteString += character
beginSplitWords = getDictionarySplitWords(globalDictionaryOperatorBegin, word)
global globalSplitDictionaryOperator
evaluatorSplitWords = []
for beginSplitWord in beginSplitWords:
if beginSplitWord in globalDictionaryOperatorBegin:
evaluatorSplitWords.append(beginSplitWord)
else:
evaluatorSplitWords += getDictionarySplitWords(globalSplitDictionaryOperator, beginSplitWord)
for evaluatorSplitWordIndex, evaluatorSplitWord in enumerate(evaluatorSplitWords):
for quoteDictionaryKey in quoteDictionary.keys():
if quoteDictionaryKey in evaluatorSplitWord:
evaluatorSplitWords[evaluatorSplitWordIndex] = evaluatorSplitWord.replace(quoteDictionaryKey, quoteDictionary[quoteDictionaryKey])
evaluatorTransitionWords = []
for evaluatorSplitWord in evaluatorSplitWords:
addQuoteWord(evaluatorTransitionWords, evaluatorSplitWord)
return evaluatorTransitionWords
def getFloatListFromBracketedString( bracketedString ):
'Get list from a bracketed string.'
if not getIsBracketed( bracketedString ):
return None
bracketedString = bracketedString.strip().replace('[', '').replace(']', '').replace('(', '').replace(')', '')
if len( bracketedString ) < 1:
return []
splitLine = bracketedString.split(',')
floatList = []
for word in splitLine:
evaluatedFloat = euclidean.getFloatFromValue(word)
if evaluatedFloat != None:
floatList.append( evaluatedFloat )
return floatList
def getFloatListListsByPaths(paths):
'Get float lists by paths.'
floatListLists = []
for path in paths:
floatListList = []
for point in path:
floatListList.append( point.getFloatList() )
return floatListLists
def getIntFromFloatString(value):
'Get the int from the string.'
floatString = str(value).strip()
if floatString == '':
return None
dotIndex = floatString.find('.')
if dotIndex < 0:
return int(value)
return int( round( float(floatString) ) )
def getIsBracketed(word):
'Determine if the word is bracketed.'
if len(word) < 2:
return False
firstCharacter = word[0]
lastCharacter = word[-1]
if firstCharacter == '(' and lastCharacter == ')':
return True
return firstCharacter == '[' and lastCharacter == ']'
def getIsQuoted(word):
'Determine if the word is quoted.'
if len(word) < 2:
return False
firstCharacter = word[0]
lastCharacter = word[-1]
if firstCharacter == '"' and lastCharacter == '"':
return True
return firstCharacter == "'" and lastCharacter == "'"
def getKeys(repository):
'Get keys for repository.'
repositoryClass = repository.__class__
if repositoryClass == list or repositoryClass == tuple:
return range(len(repository))
if repositoryClass == dict:
return repository.keys()
return None
def getLocalAttributeValueString(key, valueString):
'Get the local attribute value string with augmented assignment.'
augmentedStatements = '+= -= *= /= %= **='.split()
for augmentedStatement in augmentedStatements:
if valueString.startswith(augmentedStatement):
return key + augmentedStatement[: -1] + valueString[len(augmentedStatement) :]
return valueString
def getMatchingPlugins(elementNode, namePathDictionary):
'Get the plugins whose names are in the attribute dictionary.'
matchingPlugins = []
namePathDictionaryCopy = namePathDictionary.copy()
for key in elementNode.attributes:
dotIndex = key.find('.')
if dotIndex > - 1:
keyUntilDot = key[: dotIndex]
if keyUntilDot in namePathDictionaryCopy:
pluginModule = archive.getModuleWithPath( namePathDictionaryCopy[ keyUntilDot ] )
del namePathDictionaryCopy[ keyUntilDot ]
if pluginModule != None:
matchingPlugins.append( pluginModule )
return matchingPlugins
def getNextChildIndex(elementNode):
'Get the next childNode index.'
for childNodeIndex, childNode in enumerate( elementNode.parentNode.childNodes ):
if childNode == elementNode:
return childNodeIndex + 1
return len( elementNode.parentNode.childNodes )
def getPathByKey(defaultPath, elementNode, key):
'Get path from prefix and xml element.'
if key not in elementNode.attributes:
return defaultPath
word = str(elementNode.attributes[key]).strip()
evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word)
if evaluatedLinkValue.__class__ == list:
return getPathByList(evaluatedLinkValue)
elementNodeObject = getElementNodeObject(evaluatedLinkValue)
if elementNodeObject == None:
return defaultPath
return elementNodeObject.getPaths()[0]
def getPathByList(vertexList):
'Get the paths by list.'
if len(vertexList) < 1:
return Vector3()
if vertexList[0].__class__ != list:
vertexList = [vertexList]
path = []
for floatList in vertexList:
vector3 = getVector3ByFloatList(floatList, Vector3())
path.append(vector3)
return path
def getPathByPrefix(elementNode, path, prefix):
'Get path from prefix and xml element.'
if len(path) < 2:
print('Warning, bug, path is too small in evaluate in setPathByPrefix.')
return
pathByKey = getPathByKey([], elementNode, getCapitalizedSuffixKey(prefix, 'path'))
if len( pathByKey ) < len(path):
for pointIndex in xrange( len( pathByKey ) ):
path[pointIndex] = pathByKey[pointIndex]
else:
path = pathByKey
path[0] = getVector3ByPrefix(path[0], elementNode, getCapitalizedSuffixKey(prefix, 'pathStart'))
path[-1] = getVector3ByPrefix(path[-1], elementNode, getCapitalizedSuffixKey(prefix, 'pathEnd'))
return path
def getPathsByKey(defaultPaths, elementNode, key):
'Get paths by key.'
if key not in elementNode.attributes:
return defaultPaths
word = str(elementNode.attributes[key]).strip()
evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word)
if evaluatedLinkValue.__class__ == dict or evaluatedLinkValue.__class__ == list:
convertToPaths(evaluatedLinkValue)
return getPathsByLists(evaluatedLinkValue)
elementNodeObject = getElementNodeObject(evaluatedLinkValue)
if elementNodeObject == None:
return defaultPaths
return elementNodeObject.getPaths()
def getPathsByLists(vertexLists):
'Get paths by lists.'
vector3Lists = getVector3ListsRecursively(vertexLists)
paths = []
addToPathsRecursively(paths, vector3Lists)
return paths
def getRadiusArealizedBasedOnAreaRadius(elementNode, radius, sides):
'Get the areal radius from the radius, number of sides and cascade radiusAreal.'
if elementNode.getCascadeBoolean(False, 'radiusAreal'):
return radius
return radius * euclidean.getRadiusArealizedMultiplier(sides)
def getSidesBasedOnPrecision(elementNode, radius):
'Get the number of polygon sides.'
return int(math.ceil(math.sqrt(0.5 * radius / setting.getPrecision(elementNode)) * math.pi))
def getSidesMinimumThreeBasedOnPrecision(elementNode, radius):
'Get the number of polygon sides, with a minimum of three.'
return max(getSidesBasedOnPrecision(elementNode, radius), 3)
def getSidesMinimumThreeBasedOnPrecisionSides(elementNode, radius):
'Get the number of polygon sides, with a minimum of three.'
sides = getSidesMinimumThreeBasedOnPrecision(elementNode, radius)
return getEvaluatedFloat(sides, elementNode, 'sides')
def getSplitDictionary():
'Get split dictionary.'
global globalSplitDictionaryOperator
splitDictionary = globalSplitDictionaryOperator.copy()
global globalDictionaryOperatorBegin
splitDictionary.update( globalDictionaryOperatorBegin )
splitDictionary['and'] = EvaluatorAnd
splitDictionary['false'] = EvaluatorFalse
splitDictionary['False'] = EvaluatorFalse
splitDictionary['or'] = EvaluatorOr
splitDictionary['not'] = EvaluatorNot
splitDictionary['true'] = EvaluatorTrue
splitDictionary['True'] = EvaluatorTrue
splitDictionary['none'] = EvaluatorNone
splitDictionary['None'] = EvaluatorNone
return splitDictionary
def getStartsWithCurlyEqualRoundSquare(word):
'Determine if the word starts with round or square brackets.'
return word.startswith('{') or word.startswith('=') or word.startswith('(') or word.startswith('[')
def getTokenByNumber(number):
'Get token by number.'
return '_%s_' % number
def getTransformedPathByKey(defaultTransformedPath, elementNode, key):
'Get transformed path from prefix and xml element.'
if key not in elementNode.attributes:
return defaultTransformedPath
value = elementNode.attributes[key]
if value.__class__ == list:
return value
word = str(value).strip()
evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word)
if evaluatedLinkValue.__class__ == list:
return getPathByList(evaluatedLinkValue)
elementNodeObject = getElementNodeObject(evaluatedLinkValueClass)
if elementNodeObject == None:
return defaultTransformedPath
return elementNodeObject.getTransformedPaths()[0]
def getTransformedPathByPrefix(elementNode, path, prefix):
'Get path from prefix and xml element.'
if len(path) < 2:
print('Warning, bug, path is too small in evaluate in setPathByPrefix.')
return
pathByKey = getTransformedPathByKey([], elementNode, getCapitalizedSuffixKey(prefix, 'path'))
if len( pathByKey ) < len(path):
for pointIndex in xrange( len( pathByKey ) ):
path[pointIndex] = pathByKey[pointIndex]
else:
path = pathByKey
path[0] = getVector3ByPrefix(path[0], elementNode, getCapitalizedSuffixKey(prefix, 'pathStart'))
path[-1] = getVector3ByPrefix(path[-1], elementNode, getCapitalizedSuffixKey(prefix, 'pathEnd'))
return path
def getTransformedPathsByKey(defaultTransformedPaths, elementNode, key):
'Get transformed paths by key.'
if key not in elementNode.attributes:
return defaultTransformedPaths
value = elementNode.attributes[key]
if value.__class__ == list:
return getPathsByLists(value)
word = str(value).strip()
evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word)
if evaluatedLinkValue.__class__ == dict or evaluatedLinkValue.__class__ == list:
convertToTransformedPaths(evaluatedLinkValue)
return getPathsByLists(evaluatedLinkValue)
elementNodeObject = getElementNodeObject(evaluatedLinkValue)
if elementNodeObject == None:
return defaultTransformedPaths
return elementNodeObject.getTransformedPaths()
def getUniqueQuoteIndex( uniqueQuoteIndex, word ):
'Get uniqueQuoteIndex.'
uniqueQuoteIndex += 1
while getTokenByNumber(uniqueQuoteIndex) in word:
uniqueQuoteIndex += 1
return uniqueQuoteIndex
def getUniqueToken(word):
'Get unique token.'
uniqueString = '@#!'
for character in uniqueString:
if character not in word:
return character
uniqueNumber = 0
while True:
for character in uniqueString:
uniqueToken = character + str(uniqueNumber)
if uniqueToken not in word:
return uniqueToken
uniqueNumber += 1
def getVector3ByDictionary( dictionary, vector3 ):
'Get vector3 by dictionary.'
if 'x' in dictionary:
vector3 = getVector3IfNone(vector3)
vector3.x = euclidean.getFloatFromValue(dictionary['x'])
if 'y' in dictionary:
vector3 = getVector3IfNone(vector3)
vector3.y = euclidean.getFloatFromValue(dictionary['y'])
if 'z' in dictionary:
vector3 = getVector3IfNone(vector3)
vector3.z = euclidean.getFloatFromValue( dictionary['z'] )
return vector3
def getVector3ByDictionaryListValue(value, vector3):
'Get vector3 by dictionary, list or value.'
if value.__class__ == Vector3 or value.__class__.__name__ == 'Vector3Index':
return value
if value.__class__ == dict:
return getVector3ByDictionary(value, vector3)
if value.__class__ == list:
return getVector3ByFloatList(value, vector3)
floatFromValue = euclidean.getFloatFromValue(value)
if floatFromValue == None:
return vector3
vector3.setToXYZ(floatFromValue, floatFromValue, floatFromValue)
return vector3
def getVector3ByFloatList(floatList, vector3):
'Get vector3 by float list.'
if len(floatList) > 0:
vector3 = getVector3IfNone(vector3)
vector3.x = euclidean.getFloatFromValue(floatList[0])
if len(floatList) > 1:
vector3 = getVector3IfNone(vector3)
vector3.y = euclidean.getFloatFromValue(floatList[1])
if len(floatList) > 2:
vector3 = getVector3IfNone(vector3)
vector3.z = euclidean.getFloatFromValue(floatList[2])
return vector3
def getVector3ByMultiplierPrefix( elementNode, multiplier, prefix, vector3 ):
'Get vector3 from multiplier, prefix and xml element.'
if multiplier == 0.0:
return vector3
oldMultipliedValueVector3 = vector3 * multiplier
vector3ByPrefix = getVector3ByPrefix(oldMultipliedValueVector3.copy(), elementNode, prefix)
if vector3ByPrefix == oldMultipliedValueVector3:
return vector3
return vector3ByPrefix / multiplier
def getVector3ByMultiplierPrefixes( elementNode, multiplier, prefixes, vector3 ):
'Get vector3 from multiplier, prefixes and xml element.'
for prefix in prefixes:
vector3 = getVector3ByMultiplierPrefix( elementNode, multiplier, prefix, vector3 )
return vector3
def getVector3ByPrefix(defaultVector3, elementNode, prefix):
'Get vector3 from prefix and xml element.'
value = getEvaluatedValue(None, elementNode, prefix)
if value != None:
defaultVector3 = getVector3ByDictionaryListValue(value, defaultVector3)
prefix = archive.getUntilDot(prefix)
x = getEvaluatedFloat(None, elementNode, prefix + '.x')
if x != None:
defaultVector3 = getVector3IfNone(defaultVector3)
defaultVector3.x = x
y = getEvaluatedFloat(None, elementNode, prefix + '.y')
if y != None:
defaultVector3 = getVector3IfNone(defaultVector3)
defaultVector3.y = y
z = getEvaluatedFloat(None, elementNode, prefix + '.z')
if z != None:
defaultVector3 = getVector3IfNone(defaultVector3)
defaultVector3.z = z
return defaultVector3
def getVector3ByPrefixes( elementNode, prefixes, vector3 ):
'Get vector3 from prefixes and xml element.'
for prefix in prefixes:
vector3 = getVector3ByPrefix(vector3, elementNode, prefix)
return vector3
def getVector3FromElementNode(elementNode):
'Get vector3 from xml element.'
vector3 = Vector3(
getEvaluatedFloat(0.0, elementNode, 'x'),
getEvaluatedFloat(0.0, elementNode, 'y'),
getEvaluatedFloat(0.0, elementNode, 'z'))
return getVector3ByPrefix(vector3, elementNode, 'cartesian')
def getVector3IfNone(vector3):
'Get new vector3 if the original vector3 is none.'
if vector3 == None:
return Vector3()
return vector3
def getVector3ListsRecursively(floatLists):
'Get vector3 lists recursively.'
if len(floatLists) < 1:
return Vector3()
firstElement = floatLists[0]
if firstElement.__class__ == Vector3:
return floatLists
if firstElement.__class__ != list:
return getVector3ByFloatList(floatLists, Vector3())
vector3ListsRecursively = []
for floatList in floatLists:
vector3ListsRecursively.append(getVector3ListsRecursively(floatList))
return vector3ListsRecursively
def getVisibleObjects(archivableObjects):
'Get the visible objects.'
visibleObjects = []
for archivableObject in archivableObjects:
if archivableObject.getVisible():
visibleObjects.append(archivableObject)
return visibleObjects
def processArchivable(archivableClass, elementNode):
'Get any new elements and process the archivable.'
if elementNode == None:
return
elementNode.xmlObject = archivableClass()
elementNode.xmlObject.setToElementNode(elementNode)
elementNode.getXMLProcessor().processChildNodes(elementNode)
def processCondition(elementNode):
'Process the xml element condition.'
xmlProcessor = elementNode.getXMLProcessor()
if elementNode.xmlObject == None:
elementNode.xmlObject = ModuleElementNode(elementNode)
if elementNode.xmlObject.conditionSplitWords == None:
return
if len(xmlProcessor.functions ) < 1:
print('Warning, the (in) element is not in a function in processCondition in evaluate for:')
print(elementNode)
return
if int(getEvaluatedExpressionValueBySplitLine(elementNode, elementNode.xmlObject.conditionSplitWords)) > 0:
xmlProcessor.functions[-1].processChildNodes(elementNode)
else:
elementNode.xmlObject.processElse(elementNode)
def removeIdentifiersFromDictionary(dictionary):
'Remove the identifier elements from a dictionary.'
euclidean.removeElementsFromDictionary(dictionary, ['id', 'name', 'tags'])
return dictionary
def setAttributesByArguments(argumentNames, arguments, elementNode):
'Set the attribute dictionary to the arguments.'
for argumentIndex, argument in enumerate(arguments):
elementNode.attributes[argumentNames[argumentIndex]] = argument
def setFunctionLocalDictionary(arguments, function):
'Evaluate the function statement and delete the evaluators.'
function.localDictionary = {'_arguments' : arguments}
if len(arguments) > 0:
firstArgument = arguments[0]
if firstArgument.__class__ == dict:
function.localDictionary = firstArgument
return
if 'parameters' not in function.elementNode.attributes:
return
parameters = function.elementNode.attributes['parameters'].strip()
if parameters == '':
return
parameterWords = parameters.split(',')
for parameterWordIndex, parameterWord in enumerate(parameterWords):
strippedWord = parameterWord.strip()
keyValue = KeyValue().getByEqual(strippedWord)
if parameterWordIndex < len(arguments):
function.localDictionary[keyValue.key] = arguments[parameterWordIndex]
else:
strippedValue = keyValue.value
if strippedValue == None:
print('Warning there is no default parameter in getParameterValue for:')
print(strippedWord)
print(parameterWords)
print(arguments)
print(function.elementNode.attributes)
else:
strippedValue = strippedValue.strip()
function.localDictionary[keyValue.key.strip()] = strippedValue
if len(arguments) > len(parameterWords):
print('Warning there are too many initializeFunction parameters for:')
print(function.elementNode.attributes)
print(parameterWords)
print(arguments)
def setLocalAttribute(elementNode):
'Set the local attribute if any.'
if elementNode.xmlObject != None:
return
for key in elementNode.attributes:
if key[: 1].isalpha():
value = getEvaluatorSplitWords(getLocalAttributeValueString(key, elementNode.attributes[key].strip()))
elementNode.xmlObject = KeyValue(key, value)
return
elementNode.xmlObject = KeyValue()
class BaseFunction:
'Class to get equation results.'
def __init__(self, elementNode):
'Initialize.'
self.elementNode = elementNode
self.localDictionary = {}
self.xmlProcessor = elementNode.getXMLProcessor()
def __repr__(self):
'Get the string representation of this Class.'
return str(self.__dict__)
def getReturnValue(self):
'Get return value.'
self.getReturnValueWithoutDeletion()
del self.xmlProcessor.functions[-1]
return self.returnValue
def processChildNodes(self, elementNode):
'Process childNodes if shouldReturn is false.'
for childNode in elementNode.childNodes:
if self.shouldReturn:
return
self.xmlProcessor.processElementNode(childNode)
class ClassFunction(BaseFunction):
'Class to get class results.'
def getReturnValueByArguments(self, *arguments):
'Get return value by arguments.'
setFunctionLocalDictionary(arguments, self)
return self.getReturnValue()
def getReturnValueWithoutDeletion(self):
'Get return value without deleting last function.'
self.returnValue = None
self.shouldReturn = False
self.xmlProcessor.functions.append(self)
self.processChildNodes(self.elementNode)
return self.returnValue
class ClassObject:
'Class to hold class attributes and functions.'
def __init__(self, elementNode):
'Initialize.'
self.functionDictionary = elementNode.xmlObject.functionDictionary
self.selfDictionary = {}
for variable in elementNode.xmlObject.variables:
self.selfDictionary[variable] = None
def __repr__(self):
'Get the string representation of this Class.'
return str(self.__dict__)
def _getAccessibleAttribute(self, attributeName):
'Get the accessible attribute.'
if attributeName in self.selfDictionary:
return self.selfDictionary[attributeName]
if attributeName in self.functionDictionary:
function = self.functionDictionary[attributeName]
function.classObject = self
return function.getReturnValueByArguments
return None
def _setAccessibleAttribute(self, attributeName, value):
'Set the accessible attribute.'
if attributeName in self.selfDictionary:
self.selfDictionary[attributeName] = value
class EmptyObject:
'An empty object.'
def __init__(self):
'Do nothing.'
pass
class Evaluator:
'Base evaluator class.'
def __init__(self, elementNode, word):
'Set value to none.'
self.value = None
self.word = word
def __repr__(self):
'Get the string representation of this Class.'
return str(self.__dict__)
def executeBracket( self, bracketBeginIndex, bracketEndIndex, evaluators ):
'Execute the bracket.'
pass
def executeCenterOperation(self, evaluators, evaluatorIndex):
'Execute operator which acts on the center.'
pass
def executeDictionary(self, dictionary, evaluators, keys, evaluatorIndex, nextEvaluator):
'Execute the dictionary.'
del evaluators[evaluatorIndex]
enumeratorKeys = euclidean.getEnumeratorKeys(dictionary, keys)
if enumeratorKeys.__class__ == list:
nextEvaluator.value = []
for enumeratorKey in enumeratorKeys:
if enumeratorKey in dictionary:
nextEvaluator.value.append(dictionary[enumeratorKey])
else:
print('Warning, key in executeKey in Evaluator in evaluate is not in for:')
print(enumeratorKey)
print(dictionary)
return
if enumeratorKeys in dictionary:
nextEvaluator.value = dictionary[enumeratorKeys]
else:
print('Warning, key in executeKey in Evaluator in evaluate is not in for:')
print(enumeratorKeys)
print(dictionary)
def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator):
'Execute the function.'
pass
def executeKey(self, evaluators, keys, evaluatorIndex, nextEvaluator):
'Execute the key index.'
if self.value.__class__ == str:
self.executeString(evaluators, keys, evaluatorIndex, nextEvaluator)
return
if self.value.__class__ == list:
self.executeList(evaluators, keys, evaluatorIndex, nextEvaluator)
return
if self.value.__class__ == dict:
self.executeDictionary(self.value, evaluators, keys, evaluatorIndex, nextEvaluator)
return
getAccessibleDictionaryFunction = getattr(self.value, '_getAccessibleDictionary', None)
if getAccessibleDictionaryFunction != None:
self.executeDictionary(getAccessibleDictionaryFunction(), evaluators, keys, evaluatorIndex, nextEvaluator)
return
if self.value.__class__.__name__ != 'ElementNode':
return
del evaluators[evaluatorIndex]
enumeratorKeys = euclidean.getEnumeratorKeys(self.value.attributes, keys)
if enumeratorKeys.__class__ == list:
nextEvaluator.value = []
for enumeratorKey in enumeratorKeys:
if enumeratorKey in self.value.attributes:
nextEvaluator.value.append(getEvaluatedExpressionValue(self.value, self.value.attributes[enumeratorKey]))
else:
print('Warning, key in executeKey in Evaluator in evaluate is not in for:')
print(enumeratorKey)
print(self.value.attributes)
return
if enumeratorKeys in self.value.attributes:
nextEvaluator.value = getEvaluatedExpressionValue(self.value, self.value.attributes[enumeratorKeys])
else:
print('Warning, key in executeKey in Evaluator in evaluate is not in for:')
print(enumeratorKeys)
print(self.value.attributes)
def executeLeftOperation(self, evaluators, evaluatorIndex, operationLevel):
'Execute operator which acts from the left.'
pass
def executeList(self, evaluators, keys, evaluatorIndex, nextEvaluator):
'Execute the key index.'
del evaluators[evaluatorIndex]
enumeratorKeys = euclidean.getEnumeratorKeys(self.value, keys)
if enumeratorKeys.__class__ == list:
nextEvaluator.value = []
for enumeratorKey in enumeratorKeys:
intKey = euclidean.getIntFromValue(enumeratorKey)
if self.getIsInRange(intKey):
nextEvaluator.value.append(self.value[intKey])
else:
print('Warning, key in executeList in Evaluator in evaluate is not in for:')
print(enumeratorKey)
print(self.value)
return
intKey = euclidean.getIntFromValue(enumeratorKeys)
if self.getIsInRange(intKey):
nextEvaluator.value = self.value[intKey]
else:
print('Warning, key in executeList in Evaluator in evaluate is not in for:')
print(enumeratorKeys)
print(self.value)
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
pass
def executeRightOperation( self, evaluators, evaluatorIndex ):
'Execute operator which acts from the right.'
pass
def executeString(self, evaluators, keys, evaluatorIndex, nextEvaluator):
'Execute the string.'
del evaluators[evaluatorIndex]
enumeratorKeys = euclidean.getEnumeratorKeys(self.value, keys)
if enumeratorKeys.__class__ == list:
nextEvaluator.value = ''
for enumeratorKey in enumeratorKeys:
intKey = euclidean.getIntFromValue(enumeratorKey)
if self.getIsInRange(intKey):
nextEvaluator.value += self.value[intKey]
else:
print('Warning, key in executeString in Evaluator in evaluate is not in for:')
print(enumeratorKey)
print(self.value)
return
intKey = euclidean.getIntFromValue(enumeratorKeys)
if self.getIsInRange(intKey):
nextEvaluator.value = self.value[intKey]
else:
print('Warning, key in executeString in Evaluator in evaluate is not in for:')
print(enumeratorKeys)
print(self.value)
def getIsInRange(self, keyIndex):
'Determine if the keyIndex is in range.'
if keyIndex == None:
return False
return keyIndex >= -len(self.value) and keyIndex < len(self.value)
class EvaluatorAddition(Evaluator):
'Class to add two evaluators.'
def executePair( self, evaluators, evaluatorIndex ):
'Add two evaluators.'
leftIndex = evaluatorIndex - 1
rightIndex = evaluatorIndex + 1
if leftIndex < 0:
print('Warning, no leftKey in executePair in EvaluatorAddition for:')
print(evaluators)
print(evaluatorIndex)
print(self)
del evaluators[evaluatorIndex]
return
if rightIndex >= len(evaluators):
print('Warning, no rightKey in executePair in EvaluatorAddition for:')
print(evaluators)
print(evaluatorIndex)
print(self)
del evaluators[evaluatorIndex]
return
rightValue = evaluators[rightIndex].value
evaluators[leftIndex].value = self.getOperationValue(evaluators[leftIndex].value, evaluators[rightIndex].value)
del evaluators[ evaluatorIndex : evaluatorIndex + 2 ]
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel == 20:
self.executePair(evaluators, evaluatorIndex)
def getEvaluatedValues(self, enumerable, keys, value):
'Get evaluatedValues.'
if enumerable.__class__ == dict:
evaluatedValues = {}
for key in keys:
evaluatedValues[key] = self.getOperationValue(value, enumerable[key])
return evaluatedValues
evaluatedValues = []
for key in keys:
evaluatedValues.append(self.getOperationValue(value, enumerable[key]))
return evaluatedValues
def getOperationValue(self, leftValue, rightValue):
'Get operation value.'
leftKeys = getKeys(leftValue)
rightKeys = getKeys(rightValue)
if leftKeys == None and rightKeys == None:
return self.getValueFromValuePair(leftValue, rightValue)
if leftKeys == None:
return self.getEvaluatedValues(rightValue, rightKeys, leftValue)
if rightKeys == None:
return self.getEvaluatedValues(leftValue, leftKeys, rightValue)
leftKeys.sort(reverse=True)
rightKeys.sort(reverse=True)
if leftKeys != rightKeys:
print('Warning, the leftKeys are different from the rightKeys in getOperationValue in EvaluatorAddition for:')
print('leftValue')
print(leftValue)
print(leftKeys)
print('rightValue')
print(rightValue)
print(rightKeys)
print(self)
return None
if leftValue.__class__ == dict or rightValue.__class__ == dict:
evaluatedValues = {}
for leftKey in leftKeys:
evaluatedValues[leftKey] = self.getOperationValue(leftValue[leftKey], rightValue[leftKey])
return evaluatedValues
evaluatedValues = []
for leftKey in leftKeys:
evaluatedValues.append(self.getOperationValue(leftValue[leftKey], rightValue[leftKey]))
return evaluatedValues
def getValueFromValuePair(self, leftValue, rightValue):
'Add two values.'
return leftValue + rightValue
class EvaluatorEqual(EvaluatorAddition):
'Class to compare two evaluators.'
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel == 15:
self.executePair(evaluators, evaluatorIndex)
def getBooleanFromValuePair(self, leftValue, rightValue):
'Compare two values.'
return leftValue == rightValue
def getValueFromValuePair(self, leftValue, rightValue):
'Get value from comparison.'
return self.getBooleanFromValuePair(leftValue, rightValue)
class EvaluatorSubtraction(EvaluatorAddition):
'Class to subtract two evaluators.'
def executeLeft( self, evaluators, evaluatorIndex ):
'Minus the value to the right.'
leftIndex = evaluatorIndex - 1
rightIndex = evaluatorIndex + 1
leftValue = None
if leftIndex >= 0:
leftValue = evaluators[leftIndex].value
if leftValue != None:
return
rightValue = evaluators[rightIndex].value
if rightValue == None:
print('Warning, can not minus.')
print(evaluators[rightIndex].word)
else:
evaluators[rightIndex].value = self.getNegativeValue(rightValue)
del evaluators[evaluatorIndex]
def executeLeftOperation(self, evaluators, evaluatorIndex, operationLevel):
'Minus the value to the right.'
if operationLevel == 200:
self.executeLeft(evaluators, evaluatorIndex)
def getNegativeValue( self, value ):
'Get the negative value.'
keys = getKeys(value)
if keys == None:
return self.getValueFromSingleValue(value)
for key in keys:
value[key] = self.getNegativeValue(value[key])
return value
def getValueFromSingleValue( self, value ):
'Minus value.'
return -value
def getValueFromValuePair(self, leftValue, rightValue):
'Subtract two values.'
return leftValue - rightValue
class EvaluatorAnd(EvaluatorAddition):
'Class to compare two evaluators.'
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel == 12:
self.executePair(evaluators, evaluatorIndex)
def getBooleanFromValuePair(self, leftValue, rightValue):
'And two values.'
return leftValue and rightValue
def getValueFromValuePair(self, leftValue, rightValue):
'Get value from comparison.'
return self.getBooleanFromValuePair(leftValue, rightValue)
class EvaluatorAttribute(Evaluator):
'Class to handle an attribute.'
def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator):
'Execute the function.'
executeNextEvaluatorArguments(self, evaluators, evaluatorIndex, nextEvaluator)
def executeRightOperation( self, evaluators, evaluatorIndex ):
'Execute operator which acts from the right.'
attributeName = self.word[1 :]
previousIndex = evaluatorIndex - 1
previousEvaluator = evaluators[previousIndex]
if previousEvaluator.value.__class__ == dict:
from fabmetheus_utilities.geometry.geometry_utilities.evaluate_enumerables import dictionary_attribute
self.value = dictionary_attribute._getAccessibleAttribute(attributeName, previousEvaluator.value)
elif previousEvaluator.value.__class__ == list:
from fabmetheus_utilities.geometry.geometry_utilities.evaluate_enumerables import list_attribute
self.value = list_attribute._getAccessibleAttribute(attributeName, previousEvaluator.value)
elif previousEvaluator.value.__class__ == str:
from fabmetheus_utilities.geometry.geometry_utilities.evaluate_enumerables import string_attribute
self.value = string_attribute._getAccessibleAttribute(attributeName, previousEvaluator.value)
else:
attributeKeywords = attributeName.split('.')
self.value = previousEvaluator.value
for attributeKeyword in attributeKeywords:
self.value = getattr(self.value, '_getAccessibleAttribute', None)(attributeKeyword)
if self.value == None:
print('Warning, EvaluatorAttribute in evaluate can not get a getAccessibleAttributeFunction for:')
print(attributeName)
print(previousEvaluator.value)
print(self)
return
del evaluators[previousIndex]
class EvaluatorBracketCurly(Evaluator):
'Class to evaluate a string.'
def executeBracket(self, bracketBeginIndex, bracketEndIndex, evaluators):
'Execute the bracket.'
for evaluatorIndex in xrange(bracketEndIndex - 3, bracketBeginIndex, - 1):
bracketEndIndex = getEndIndexConvertEquationValue(bracketEndIndex, evaluatorIndex, evaluators)
evaluatedExpressionValueEvaluators = getBracketEvaluators(bracketBeginIndex, bracketEndIndex, evaluators)
self.value = {}
for evaluatedExpressionValueEvaluator in evaluatedExpressionValueEvaluators:
keyValue = evaluatedExpressionValueEvaluator.value
self.value[keyValue.key] = keyValue.value
del evaluators[bracketBeginIndex + 1: bracketEndIndex + 1]
class EvaluatorBracketRound(Evaluator):
'Class to evaluate a string.'
def __init__(self, elementNode, word):
'Set value to none.'
self.arguments = []
self.value = None
self.word = word
def executeBracket( self, bracketBeginIndex, bracketEndIndex, evaluators ):
'Execute the bracket.'
self.arguments = getBracketValuesDeleteEvaluator(bracketBeginIndex, bracketEndIndex, evaluators)
if len( self.arguments ) < 1:
return
if len( self.arguments ) > 1:
self.value = self.arguments
else:
self.value = self.arguments[0]
def executeRightOperation( self, evaluators, evaluatorIndex ):
'Evaluate the statement and delete the evaluators.'
previousIndex = evaluatorIndex - 1
if previousIndex < 0:
return
evaluators[ previousIndex ].executeFunction( evaluators, previousIndex, self )
class EvaluatorBracketSquare(Evaluator):
'Class to evaluate a string.'
def executeBracket( self, bracketBeginIndex, bracketEndIndex, evaluators ):
'Execute the bracket.'
self.value = getBracketValuesDeleteEvaluator(bracketBeginIndex, bracketEndIndex, evaluators)
def executeRightOperation( self, evaluators, evaluatorIndex ):
'Evaluate the statement and delete the evaluators.'
previousIndex = evaluatorIndex - 1
if previousIndex < 0:
return
if self.value.__class__ != list:
return
evaluators[ previousIndex ].executeKey( evaluators, self.value, previousIndex, self )
class EvaluatorClass(Evaluator):
'Class evaluator class.'
def __init__(self, elementNode, word):
'Set value to none.'
self.elementNode = elementNode
self.value = None
self.word = word
def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator):
'Execute the function.'
if self.elementNode.xmlObject == None:
self.elementNode.xmlObject = FunctionVariable(self.elementNode)
nextEvaluator.value = ClassObject(self.elementNode)
initializeFunction = None
if '_init' in self.elementNode.xmlObject.functionDictionary:
function = self.elementNode.xmlObject.functionDictionary['_init']
function.classObject = nextEvaluator.value
setFunctionLocalDictionary(nextEvaluator.arguments, function)
function.getReturnValue()
del evaluators[evaluatorIndex]
class EvaluatorComma(Evaluator):
'Class to join two evaluators.'
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel != 0:
return
previousIndex = evaluatorIndex - 1
if previousIndex < 0:
evaluators[evaluatorIndex].value = None
return
if evaluators[previousIndex].word == ',':
evaluators[evaluatorIndex].value = None
return
del evaluators[evaluatorIndex]
class EvaluatorConcatenate(Evaluator):
'Class to join two evaluators.'
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel != 80:
return
leftIndex = evaluatorIndex - 1
if leftIndex < 0:
del evaluators[evaluatorIndex]
return
rightIndex = evaluatorIndex + 1
if rightIndex >= len(evaluators):
del evaluators[ leftIndex : rightIndex ]
return
leftValue = evaluators[leftIndex].value
rightValue = evaluators[rightIndex].value
if leftValue.__class__ == rightValue.__class__ and (leftValue.__class__ == list or rightValue.__class__ == str):
evaluators[leftIndex].value = leftValue + rightValue
del evaluators[ evaluatorIndex : evaluatorIndex + 2 ]
return
if leftValue.__class__ == list and rightValue.__class__ == int:
if rightValue > 0:
originalList = leftValue[:]
for copyIndex in xrange( rightValue - 1 ):
leftValue += originalList
evaluators[leftIndex].value = leftValue
del evaluators[ evaluatorIndex : evaluatorIndex + 2 ]
return
if leftValue.__class__ == dict and rightValue.__class__ == dict:
leftValue.update(rightValue)
evaluators[leftIndex].value = leftValue
del evaluators[ evaluatorIndex : evaluatorIndex + 2 ]
return
del evaluators[ leftIndex : evaluatorIndex + 2 ]
class EvaluatorDictionary(Evaluator):
'Class to join two evaluators.'
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel != 10:
return
leftEvaluatorIndex = evaluatorIndex - 1
if leftEvaluatorIndex < 0:
print('Warning, leftEvaluatorIndex is less than zero in EvaluatorDictionary for:')
print(self)
print(evaluators)
return
rightEvaluatorIndex = evaluatorIndex + 1
if rightEvaluatorIndex >= len(evaluators):
print('Warning, rightEvaluatorIndex too high in EvaluatorDictionary for:')
print(rightEvaluatorIndex)
print(self)
print(evaluators)
return
evaluators[rightEvaluatorIndex].value = KeyValue(evaluators[leftEvaluatorIndex].value, evaluators[rightEvaluatorIndex].value)
del evaluators[ leftEvaluatorIndex : rightEvaluatorIndex ]
class EvaluatorDivision(EvaluatorAddition):
'Class to divide two evaluators.'
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel == 40:
self.executePair(evaluators, evaluatorIndex)
def getValueFromValuePair(self, leftValue, rightValue):
'Divide two values.'
return leftValue / rightValue
class EvaluatorElement(Evaluator):
'Element evaluator class.'
def __init__(self, elementNode, word):
'Set value to none.'
self.elementNode = elementNode
self.value = None
self.word = word
def executeCenterOperation(self, evaluators, evaluatorIndex):
'Execute operator which acts on the center.'
dotIndex = self.word.find('.')
if dotIndex < 0:
print('Warning, EvaluatorElement in evaluate can not find the dot for:')
print(functionName)
print(self)
return
attributeName = self.word[dotIndex + 1 :]
moduleName = self.word[: dotIndex]
if moduleName in globalModuleFunctionsDictionary:
self.value = globalModuleFunctionsDictionary[moduleName](attributeName, self.elementNode)
return
pluginModule = None
if moduleName in globalElementNameSet:
pluginModule = archive.getModuleWithPath(archive.getElementsPath(moduleName))
if pluginModule == None:
print('Warning, EvaluatorElement in evaluate can not get a pluginModule for:')
print(moduleName)
print(self)
return
getAccessibleAttributeFunction = pluginModule._getAccessibleAttribute
globalModuleFunctionsDictionary[moduleName] = getAccessibleAttributeFunction
self.value = getAccessibleAttributeFunction(attributeName, self.elementNode)
def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator):
'Execute the function.'
executeNextEvaluatorArguments(self, evaluators, evaluatorIndex, nextEvaluator)
class EvaluatorFalse(Evaluator):
'Class to evaluate a string.'
def __init__(self, elementNode, word):
'Set value to zero.'
self.value = False
self.word = word
class EvaluatorFunction(Evaluator):
'Function evaluator class.'
def __init__(self, elementNode, word):
'Set value to none.'
self.elementNode = elementNode
self.value = None
self.word = word
def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator):
'Execute the function.'
if self.elementNode.xmlObject == None:
if 'return' in self.elementNode.attributes:
value = self.elementNode.attributes['return']
self.elementNode.xmlObject = getEvaluatorSplitWords(value)
else:
self.elementNode.xmlObject = []
self.function = Function(self.elementNode )
setFunctionLocalDictionary(nextEvaluator.arguments, self.function)
nextEvaluator.value = self.function.getReturnValue()
del evaluators[evaluatorIndex]
class EvaluatorFundamental(Evaluator):
'Fundamental evaluator class.'
def executeCenterOperation(self, evaluators, evaluatorIndex):
'Execute operator which acts on the center.'
dotIndex = self.word.find('.')
if dotIndex < 0:
print('Warning, EvaluatorFundamental in evaluate can not find the dot for:')
print(functionName)
print(self)
return
attributeName = self.word[dotIndex + 1 :]
moduleName = self.word[: dotIndex]
if moduleName in globalModuleFunctionsDictionary:
self.value = globalModuleFunctionsDictionary[moduleName](attributeName)
return
pluginModule = None
if moduleName in globalFundamentalNameSet:
pluginModule = archive.getModuleWithPath(archive.getFundamentalsPath(moduleName))
else:
underscoredName = '_' + moduleName
if underscoredName in globalFundamentalNameSet:
pluginModule = archive.getModuleWithPath(archive.getFundamentalsPath(underscoredName))
if pluginModule == None:
print('Warning, EvaluatorFundamental in evaluate can not get a pluginModule for:')
print(moduleName)
print(self)
return
getAccessibleAttributeFunction = pluginModule._getAccessibleAttribute
globalModuleFunctionsDictionary[moduleName] = getAccessibleAttributeFunction
self.value = getAccessibleAttributeFunction(attributeName)
def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator):
'Execute the function.'
executeNextEvaluatorArguments(self, evaluators, evaluatorIndex, nextEvaluator)
class EvaluatorGreaterEqual( EvaluatorEqual ):
'Class to compare two evaluators.'
def getBooleanFromValuePair(self, leftValue, rightValue):
'Compare two values.'
return leftValue >= rightValue
class EvaluatorGreater( EvaluatorEqual ):
'Class to compare two evaluators.'
def getBooleanFromValuePair(self, leftValue, rightValue):
'Compare two values.'
return leftValue > rightValue
class EvaluatorLessEqual( EvaluatorEqual ):
'Class to compare two evaluators.'
def getBooleanFromValuePair(self, leftValue, rightValue):
'Compare two values.'
return leftValue <= rightValue
class EvaluatorLess( EvaluatorEqual ):
'Class to compare two evaluators.'
def getBooleanFromValuePair(self, leftValue, rightValue):
'Compare two values.'
return leftValue < rightValue
class EvaluatorLocal(EvaluatorElement):
'Class to get a local variable.'
def executeCenterOperation(self, evaluators, evaluatorIndex):
'Execute operator which acts on the center.'
functions = self.elementNode.getXMLProcessor().functions
if len(functions) < 1:
print('Warning, there are no functions in EvaluatorLocal in evaluate for:')
print(self.word)
return
attributeKeywords = self.word.split('.')
self.value = functions[-1].localDictionary[attributeKeywords[0]]
for attributeKeyword in attributeKeywords[1 :]:
self.value = self.value._getAccessibleAttribute(attributeKeyword)
class EvaluatorModulo( EvaluatorDivision ):
'Class to modulo two evaluators.'
def getValueFromValuePair(self, leftValue, rightValue):
'Modulo two values.'
return leftValue % rightValue
class EvaluatorMultiplication( EvaluatorDivision ):
'Class to multiply two evaluators.'
def getValueFromValuePair(self, leftValue, rightValue):
'Multiply two values.'
return leftValue * rightValue
class EvaluatorNone(Evaluator):
'Class to evaluate None.'
def __init__(self, elementNode, word):
'Set value to none.'
self.value = None
self.word = str(word)
class EvaluatorNot(EvaluatorSubtraction):
'Class to compare two evaluators.'
def executeLeftOperation(self, evaluators, evaluatorIndex, operationLevel):
'Minus the value to the right.'
if operationLevel == 13:
self.executeLeft(evaluators, evaluatorIndex)
def getValueFromSingleValue( self, value ):
'Minus value.'
return not value
class EvaluatorNotEqual( EvaluatorEqual ):
'Class to compare two evaluators.'
def getBooleanFromValuePair(self, leftValue, rightValue):
'Compare two values.'
return leftValue != rightValue
class EvaluatorNumeric(Evaluator):
'Class to evaluate a string.'
def __init__(self, elementNode, word):
'Set value.'
self.value = None
self.word = word
try:
if '.' in word:
self.value = float(word)
else:
self.value = int(word)
except:
print('Warning, EvaluatorNumeric in evaluate could not get a numeric value for:')
print(word)
print(elementNode)
class EvaluatorOr( EvaluatorAnd ):
'Class to compare two evaluators.'
def getBooleanFromValuePair(self, leftValue, rightValue):
'Or two values.'
return leftValue or rightValue
class EvaluatorPower(EvaluatorAddition):
'Class to power two evaluators.'
def executePairOperation(self, evaluators, evaluatorIndex, operationLevel):
'Operate on two evaluators.'
if operationLevel == 60:
self.executePair(evaluators, evaluatorIndex)
def getValueFromValuePair(self, leftValue, rightValue):
'Power of two values.'
return leftValue ** rightValue
class EvaluatorSelf(EvaluatorElement):
'Class to handle self.'
def executeCenterOperation(self, evaluators, evaluatorIndex):
'Execute operator which acts on the center.'
functions = self.elementNode.getXMLProcessor().functions
if len(functions) < 1:
print('Warning, there are no functions in executeCenterOperation in EvaluatorSelf in evaluate for:')
print(self.elementNode)
return
function = functions[-1]
attributeKeywords = self.word.split('.')
self.value = function.classObject
for attributeKeyword in attributeKeywords[1 :]:
self.value = self.value._getAccessibleAttribute(attributeKeyword)
class EvaluatorTrue(Evaluator):
'Class to evaluate a string.'
def __init__(self, elementNode, word):
'Set value to true.'
self.value = True
self.word = word
class EvaluatorValue(Evaluator):
'Class to evaluate a string.'
def __init__(self, word):
'Set value to none.'
self.value = word
self.word = str(word)
class Function(BaseFunction):
'Class to get equation results.'
def __init__(self, elementNode):
'Initialize.'
self.elementNode = elementNode
self.evaluatorSplitLine = elementNode.xmlObject
self.localDictionary = {}
self.xmlProcessor = elementNode.getXMLProcessor()
def getReturnValueWithoutDeletion(self):
'Get return value without deleting last function.'
self.returnValue = None
self.xmlProcessor.functions.append(self)
if len(self.evaluatorSplitLine) < 1:
self.shouldReturn = False
self.processChildNodes(self.elementNode)
else:
self.returnValue = getEvaluatedExpressionValueBySplitLine(self.elementNode, self.evaluatorSplitLine)
return self.returnValue
class FunctionVariable:
'Class to hold class functions and variable set.'
def __init__(self, elementNode):
'Initialize.'
self.functionDictionary = {}
self.variables = []
self.processClass(elementNode)
def addToVariableSet(self, elementNode):
'Add to variables.'
setLocalAttribute(elementNode)
keySplitLine = elementNode.xmlObject.key.split('.')
if len(keySplitLine) == 2:
if keySplitLine[0] == 'self':
variable = keySplitLine[1]
if variable not in self.variables:
self.variables.append(variable)
def processClass(self, elementNode):
'Add class to FunctionVariable.'
for childNode in elementNode.childNodes:
self.processFunction(childNode)
if 'parentNode' in elementNode.attributes:
self.processClass(elementNode.getElementNodeByID(elementNode.attributes['parentNode']))
def processFunction(self, elementNode):
'Add function to function dictionary.'
if elementNode.getNodeName() != 'function':
return
idKey = elementNode.attributes['id']
if idKey in self.functionDictionary:
return
self.functionDictionary[idKey] = ClassFunction(elementNode)
for childNode in elementNode.childNodes:
self.processStatement(childNode)
def processStatement(self, elementNode):
'Add self statement to variables.'
if elementNode.getNodeName() == 'statement':
self.addToVariableSet(elementNode)
for childNode in elementNode.childNodes:
self.processStatement(childNode)
class KeyValue:
'Class to hold a key value.'
def __init__(self, key=None, value=None):
'Get key value.'
self.key = key
self.value = value
def __repr__(self):
'Get the string representation of this KeyValue.'
return str(self.__dict__)
def getByCharacter( self, character, line ):
'Get by character.'
dotIndex = line.find( character )
if dotIndex < 0:
self.key = line
self.value = None
return self
self.key = line[: dotIndex]
self.value = line[dotIndex + 1 :]
return self
def getByDot(self, line):
'Get by dot.'
return self.getByCharacter('.', line )
def getByEqual(self, line):
'Get by dot.'
return self.getByCharacter('=', line )
class ModuleElementNode:
'Class to get the in attribute, the index name and the value name.'
def __init__( self, elementNode):
'Initialize.'
self.conditionSplitWords = None
self.elseElement = None
if 'condition' in elementNode.attributes:
self.conditionSplitWords = getEvaluatorSplitWords( elementNode.attributes['condition'] )
else:
print('Warning, could not find the condition attribute in ModuleElementNode in evaluate for:')
print(elementNode)
return
if len( self.conditionSplitWords ) < 1:
self.conditionSplitWords = None
print('Warning, could not get split words for the condition attribute in ModuleElementNode in evaluate for:')
print(elementNode)
nextIndex = getNextChildIndex(elementNode)
if nextIndex >= len( elementNode.parentNode.childNodes ):
return
nextElementNode = elementNode.parentNode.childNodes[ nextIndex ]
lowerLocalName = nextElementNode.getNodeName().lower()
if lowerLocalName != 'else' and lowerLocalName != 'elif':
return
xmlProcessor = elementNode.getXMLProcessor()
if lowerLocalName not in xmlProcessor.namePathDictionary:
return
self.pluginModule = archive.getModuleWithPath( xmlProcessor.namePathDictionary[ lowerLocalName ] )
if self.pluginModule == None:
return
self.elseElement = nextElementNode
def processElse(self, elementNode):
'Process the else statement.'
if self.elseElement != None:
self.pluginModule.processElse( self.elseElement)
globalCreationDictionary = archive.getGeometryDictionary('creation')
globalDictionaryOperatorBegin = {
'||' : EvaluatorConcatenate,
'==' : EvaluatorEqual,
'>=' : EvaluatorGreaterEqual,
'<=' : EvaluatorLessEqual,
'!=' : EvaluatorNotEqual,
'**' : EvaluatorPower }
globalModuleEvaluatorDictionary = {}
globalFundamentalNameSet = set(archive.getPluginFileNamesFromDirectoryPath(archive.getFundamentalsPath()))
addPrefixDictionary(globalModuleEvaluatorDictionary, globalFundamentalNameSet, EvaluatorFundamental)
globalElementNameSet = set(archive.getPluginFileNamesFromDirectoryPath(archive.getElementsPath()))
addPrefixDictionary(globalModuleEvaluatorDictionary, globalElementNameSet, EvaluatorElement)
globalModuleEvaluatorDictionary['self'] = EvaluatorSelf
globalSplitDictionaryOperator = {
'+' : EvaluatorAddition,
'{' : EvaluatorBracketCurly,
'}' : Evaluator,
'(' : EvaluatorBracketRound,
')' : Evaluator,
'[' : EvaluatorBracketSquare,
']' : Evaluator,
',' : EvaluatorComma,
':' : EvaluatorDictionary,
'/' : EvaluatorDivision,
'>' : EvaluatorGreater,
'<' : EvaluatorLess,
'%' : EvaluatorModulo,
'*' : EvaluatorMultiplication,
'-' : EvaluatorSubtraction }
globalSplitDictionary = getSplitDictionary() # must be after globalSplitDictionaryOperator
| gpl-2.0 |
reclamador/python_selenium_astride | tests/test_selenium_astride.py | 1 | 3265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_selenium_astride
----------------------------------
Tests for `selenium_astride` module.
"""
from flask import Flask, render_template, redirect, url_for, request, session, flash
from flask.ext.testing import LiveServerTestCase
from selenium import webdriver
import unittest
from tests import pages
class Entry(object):
def __init__(self, title, text):
self.title = title
self.text = text
entries = [Entry('Post1', 'Hello world')]
class SeleniumAstrideTest(LiveServerTestCase):
def create_app(self):
app = Flask(__name__)
@app.route('/')
def show_entries():
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
entries.append(Entry(request.form['title'], request.form['text']))
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
app.config['USERNAME'] = 'floren'
app.config['PASSWORD'] = 'astride'
app.config['TESTING'] = True
app.config['LIVESERVER_PORT'] = 8943
return app
def setUp(self):
self.app = self.create_app()
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('disable-gpu')
self.browser = webdriver.Chrome(options=options)
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_check_element(self):
self.browser.get(self.get_server_url())
home_page = pages.HomePage(self.browser)
first_entry_title = home_page.first_entry()
self.assertEqual('Post1', first_entry_title)
def test_click_link(self):
self.browser.get((self.get_server_url()))
home_page = pages.HomePage(self.browser)
home_page.go_login()
login_page = pages.LoginPage(self.browser)
self.assertEqual('Login', login_page.title_page())
def test_page_elements(self):
self.browser.get(self.get_server_url() + '/login')
login_page = pages.LoginPage(self.browser)
login_page.username = "floren"
login_page.username.clear()
login_page.username = "floren"
login_page.password = "astride"
login_page.password.clear()
login_page.login()
self.assertIn('Invalid password', login_page.get_error())
if __name__ == '__main__':
unittest.main()
| mit |
linjoahow/cd0505 | static/Brython3.1.1-20150328-091302/Lib/_abcoll.py | 688 | 5155 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
"""
### collection related types which are not exposed through builtin ###
## iterators ##
#fixme brython
#bytes_iterator = type(iter(b''))
bytes_iterator = type(iter(''))
#fixme brython
#bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
dict_proxy = type(type.__dict__)
"""
def abstractmethod(self):
return self
### ONE-TRICK PONIES ###
#class Iterable(metaclass=ABCMeta):
class Iterable:
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Sized(metaclass=ABCMeta):
class Sized:
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Container(metaclass=ABCMeta):
class Container:
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
#MutableMapping.register(dict)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.